ngram
listlengths
0
67.8k
[ "{square.is_square()}\") # child method print(f\"square.area() = {square.area()}\") # parent method # rec destroyed", "self.width * self.height class Square(Rectangle): def __init__(self, width, height): Rectangle.__init__(self, width, height) def", "= {self.height!r})') def __str__(self): return (f'width = {self.width} , ' f'height = {self.height}')", "be destroyed print(self.__class__.__name__, \" is destroyed\") def __repr__(self): return (f'{self.__class__.__name__}' f'(width = {self.width!r},", "= Rectangle(1, 2) print(rec) # call rec.__str__ if __str__ is defined, otherwise call", "__init__(self, width, height): Rectangle.__init__(self, width, height) def is_square(self): return self.width == self.height def", "# child method print(f\"square.area() = {square.area()}\") # parent method # rec destroyed here", "{self.height}') def area(self): return self.width * self.height class Square(Rectangle): def __init__(self, width, height):", "address # rec destroyed here check_rec() print(\"\") def check_square(): print(\"square\") square = Square(2,", "print(square) # call parent's method print(str(square)) # call parent's method print(repr(square)) # call", "object is about to be destroyed print(self.__class__.__name__, \" is destroyed\") def __repr__(self): return", "not defined, returns memory address # rec destroyed here check_rec() print(\"\") def check_square():", "returns memory address # rec destroyed here check_rec() print(\"\") def check_square(): print(\"square\") square", "class def __init__(self, width, height): self.width = width self.height = height def __del__(self):", "# call rec.__str__ if __str__ is defined, otherwise call __repr__ print(str(rec)) # equivalent", "__repr__ print(str(rec)) # equivalent ot print(rec) print(repr(rec)) # if rec is not defined,", "(f'width = {self.width} , ' f'height = {self.height}') def area(self): return self.width *", "Rectangle.__init__(self, width, height) def is_square(self): return self.width == self.height def check_rec(): print(\"rectangle\") rec", "print(f\"square.is_square() = {square.is_square()}\") # child method print(f\"square.area() = {square.area()}\") # parent method #", "__str__ is defined, otherwise call __repr__ print(str(rec)) # equivalent ot print(rec) print(repr(rec)) #", "is defined, otherwise call __repr__ print(str(rec)) # equivalent ot print(rec) print(repr(rec)) # if", "# call parent's method print(repr(square)) # call parent's method print(f\"square.is_square() = {square.is_square()}\") #", "def __del__(self): # Called when object is about to be destroyed print(self.__class__.__name__, \"", "height): Rectangle.__init__(self, width, height) def is_square(self): return self.width == self.height def check_rec(): print(\"rectangle\")", "self.height = height def __del__(self): # Called when object is about to be", "* self.height class Square(Rectangle): def __init__(self, width, height): Rectangle.__init__(self, width, height) def is_square(self):", "width, height): Rectangle.__init__(self, width, height) def is_square(self): return self.width == self.height def check_rec():", "width self.height = height def __del__(self): # Called when object is about to", "def __repr__(self): return (f'{self.__class__.__name__}' f'(width = {self.width!r}, height = {self.height!r})') def __str__(self): return", "if __str__ is defined, otherwise call __repr__ print(str(rec)) # equivalent ot print(rec) print(repr(rec))", "print(str(square)) # call parent's method print(repr(square)) # call parent's method print(f\"square.is_square() = {square.is_square()}\")", "' f'height = {self.height}') def area(self): return self.width * self.height class Square(Rectangle): def", "# equivalent ot print(rec) print(repr(rec)) # if rec is not defined, returns memory", "return self.width == self.height def check_rec(): print(\"rectangle\") rec = Rectangle(1, 2) print(rec) #", "self.height def check_rec(): print(\"rectangle\") rec = Rectangle(1, 2) print(rec) # call rec.__str__ if", "print(rec) # call rec.__str__ if __str__ is defined, otherwise call __repr__ print(str(rec)) #", "height): self.width = width self.height = height def __del__(self): # Called when object", "rec = Rectangle(1, 2) print(rec) # call rec.__str__ if __str__ is defined, otherwise", "def __init__(self, width, height): self.width = width self.height = height def __del__(self): #", "self.width = width self.height = height def __del__(self): # Called when object is", "here check_rec() print(\"\") def check_square(): print(\"square\") square = Square(2, 2) print(square) # call", "call parent's method print(f\"square.is_square() = {square.is_square()}\") # child method print(f\"square.area() = {square.area()}\") #", "square = Square(2, 2) print(square) # call parent's method print(str(square)) # call parent's", "call rec.__str__ if __str__ is defined, otherwise call __repr__ print(str(rec)) # equivalent ot", "def check_rec(): print(\"rectangle\") rec = Rectangle(1, 2) print(rec) # call rec.__str__ if __str__", "__repr__(self): return (f'{self.__class__.__name__}' f'(width = {self.width!r}, height = {self.height!r})') def __str__(self): return (f'width", "{self.height!r})') def __str__(self): return (f'width = {self.width} , ' f'height = {self.height}') def", ", ' f'height = {self.height}') def area(self): return self.width * self.height class Square(Rectangle):", "def area(self): return self.width * self.height class Square(Rectangle): def __init__(self, width, height): Rectangle.__init__(self,", "is_square(self): return self.width == self.height def check_rec(): print(\"rectangle\") rec = Rectangle(1, 2) print(rec)", "= {self.width!r}, height = {self.height!r})') def __str__(self): return (f'width = {self.width} , '", "is not defined, returns memory address # rec destroyed here check_rec() print(\"\") def", "= {self.height}') def area(self): return self.width * self.height class Square(Rectangle): def __init__(self, width,", "# call parent's method print(str(square)) # call parent's method print(repr(square)) # call parent's", "call parent's method print(repr(square)) # call parent's method print(f\"square.is_square() = {square.is_square()}\") # child", "def is_square(self): return self.width == self.height def check_rec(): print(\"rectangle\") rec = Rectangle(1, 2)", "print(str(rec)) # equivalent ot print(rec) print(repr(rec)) # if rec is not defined, returns", "Called when object is about to be destroyed print(self.__class__.__name__, \" is destroyed\") def", "destroyed here check_rec() print(\"\") def check_square(): print(\"square\") square = Square(2, 2) print(square) #", "method print(f\"square.is_square() = {square.is_square()}\") # child method print(f\"square.area() = {square.area()}\") # parent method", "= {square.is_square()}\") # child method print(f\"square.area() = {square.area()}\") # parent method # rec", "print(self.__class__.__name__, \" is destroyed\") def __repr__(self): return (f'{self.__class__.__name__}' f'(width = {self.width!r}, height =", "otherwise call __repr__ print(str(rec)) # equivalent ot print(rec) print(repr(rec)) # if rec is", "rec destroyed here check_rec() print(\"\") def check_square(): print(\"square\") square = Square(2, 2) print(square)", "f'(width = {self.width!r}, height = {self.height!r})') def __str__(self): return (f'width = {self.width} ,", "check_rec(): print(\"rectangle\") rec = Rectangle(1, 2) print(rec) # call rec.__str__ if __str__ is", "call parent's method print(str(square)) # call parent's method print(repr(square)) # call parent's method", "__str__(self): return (f'width = {self.width} , ' f'height = {self.height}') def area(self): return", "self.height class Square(Rectangle): def __init__(self, width, height): Rectangle.__init__(self, width, height) def is_square(self): return", "Square(2, 2) print(square) # call parent's method print(str(square)) # call parent's method print(repr(square))", "destroyed print(self.__class__.__name__, \" is destroyed\") def __repr__(self): return (f'{self.__class__.__name__}' f'(width = {self.width!r}, height", "Square(Rectangle): def __init__(self, width, height): Rectangle.__init__(self, width, height) def is_square(self): return self.width ==", "width, height) def is_square(self): return self.width == self.height def check_rec(): print(\"rectangle\") rec =", "2) print(rec) # call rec.__str__ if __str__ is defined, otherwise call __repr__ print(str(rec))", "def __str__(self): return (f'width = {self.width} , ' f'height = {self.height}') def area(self):", "{self.width!r}, height = {self.height!r})') def __str__(self): return (f'width = {self.width} , ' f'height", "check_rec() print(\"\") def check_square(): print(\"square\") square = Square(2, 2) print(square) # call parent's", "return (f'width = {self.width} , ' f'height = {self.height}') def area(self): return self.width", "parent's method print(f\"square.is_square() = {square.is_square()}\") # child method print(f\"square.area() = {square.area()}\") # parent", "Rectangle: # define parent class def __init__(self, width, height): self.width = width self.height", "method print(repr(square)) # call parent's method print(f\"square.is_square() = {square.is_square()}\") # child method print(f\"square.area()", "= {self.width} , ' f'height = {self.height}') def area(self): return self.width * self.height", "class Square(Rectangle): def __init__(self, width, height): Rectangle.__init__(self, width, height) def is_square(self): return self.width", "2) print(square) # call parent's method print(str(square)) # call parent's method print(repr(square)) #", "defined, returns memory address # rec destroyed here check_rec() print(\"\") def check_square(): print(\"square\")", "is destroyed\") def __repr__(self): return (f'{self.__class__.__name__}' f'(width = {self.width!r}, height = {self.height!r})') def", "parent's method print(repr(square)) # call parent's method print(f\"square.is_square() = {square.is_square()}\") # child method", "= Square(2, 2) print(square) # call parent's method print(str(square)) # call parent's method", "# rec destroyed here check_rec() print(\"\") def check_square(): print(\"square\") square = Square(2, 2)", "area(self): return self.width * self.height class Square(Rectangle): def __init__(self, width, height): Rectangle.__init__(self, width,", "print(\"rectangle\") rec = Rectangle(1, 2) print(rec) # call rec.__str__ if __str__ is defined,", "check_square(): print(\"square\") square = Square(2, 2) print(square) # call parent's method print(str(square)) #", "print(rec) print(repr(rec)) # if rec is not defined, returns memory address # rec", "self.width == self.height def check_rec(): print(\"rectangle\") rec = Rectangle(1, 2) print(rec) # call", "(f'{self.__class__.__name__}' f'(width = {self.width!r}, height = {self.height!r})') def __str__(self): return (f'width = {self.width}", "return self.width * self.height class Square(Rectangle): def __init__(self, width, height): Rectangle.__init__(self, width, height)", "height = {self.height!r})') def __str__(self): return (f'width = {self.width} , ' f'height =", "return (f'{self.__class__.__name__}' f'(width = {self.width!r}, height = {self.height!r})') def __str__(self): return (f'width =", "def __init__(self, width, height): Rectangle.__init__(self, width, height) def is_square(self): return self.width == self.height", "method print(str(square)) # call parent's method print(repr(square)) # call parent's method print(f\"square.is_square() =", "memory address # rec destroyed here check_rec() print(\"\") def check_square(): print(\"square\") square =", "equivalent ot print(rec) print(repr(rec)) # if rec is not defined, returns memory address", "print(\"\") def check_square(): print(\"square\") square = Square(2, 2) print(square) # call parent's method", "print(\"square\") square = Square(2, 2) print(square) # call parent's method print(str(square)) # call", "= height def __del__(self): # Called when object is about to be destroyed", "when object is about to be destroyed print(self.__class__.__name__, \" is destroyed\") def __repr__(self):", "defined, otherwise call __repr__ print(str(rec)) # equivalent ot print(rec) print(repr(rec)) # if rec", "Rectangle(1, 2) print(rec) # call rec.__str__ if __str__ is defined, otherwise call __repr__", "__del__(self): # Called when object is about to be destroyed print(self.__class__.__name__, \" is", "is about to be destroyed print(self.__class__.__name__, \" is destroyed\") def __repr__(self): return (f'{self.__class__.__name__}'", "rec.__str__ if __str__ is defined, otherwise call __repr__ print(str(rec)) # equivalent ot print(rec)", "== self.height def check_rec(): print(\"rectangle\") rec = Rectangle(1, 2) print(rec) # call rec.__str__", "height def __del__(self): # Called when object is about to be destroyed print(self.__class__.__name__,", "height) def is_square(self): return self.width == self.height def check_rec(): print(\"rectangle\") rec = Rectangle(1,", "about to be destroyed print(self.__class__.__name__, \" is destroyed\") def __repr__(self): return (f'{self.__class__.__name__}' f'(width", "\" is destroyed\") def __repr__(self): return (f'{self.__class__.__name__}' f'(width = {self.width!r}, height = {self.height!r})')", "{self.width} , ' f'height = {self.height}') def area(self): return self.width * self.height class", "ot print(rec) print(repr(rec)) # if rec is not defined, returns memory address #", "def check_square(): print(\"square\") square = Square(2, 2) print(square) # call parent's method print(str(square))", "print(repr(rec)) # if rec is not defined, returns memory address # rec destroyed", "parent's method print(str(square)) # call parent's method print(repr(square)) # call parent's method print(f\"square.is_square()", "# call parent's method print(f\"square.is_square() = {square.is_square()}\") # child method print(f\"square.area() = {square.area()}\")", "# define parent class def __init__(self, width, height): self.width = width self.height =", "to be destroyed print(self.__class__.__name__, \" is destroyed\") def __repr__(self): return (f'{self.__class__.__name__}' f'(width =", "destroyed\") def __repr__(self): return (f'{self.__class__.__name__}' f'(width = {self.width!r}, height = {self.height!r})') def __str__(self):", "f'height = {self.height}') def area(self): return self.width * self.height class Square(Rectangle): def __init__(self,", "define parent class def __init__(self, width, height): self.width = width self.height = height", "call __repr__ print(str(rec)) # equivalent ot print(rec) print(repr(rec)) # if rec is not", "print(repr(square)) # call parent's method print(f\"square.is_square() = {square.is_square()}\") # child method print(f\"square.area() =", "rec is not defined, returns memory address # rec destroyed here check_rec() print(\"\")", "child method print(f\"square.area() = {square.area()}\") # parent method # rec destroyed here check_square()", "# Called when object is about to be destroyed print(self.__class__.__name__, \" is destroyed\")", "parent class def __init__(self, width, height): self.width = width self.height = height def", "width, height): self.width = width self.height = height def __del__(self): # Called when", "if rec is not defined, returns memory address # rec destroyed here check_rec()", "= width self.height = height def __del__(self): # Called when object is about", "class Rectangle: # define parent class def __init__(self, width, height): self.width = width", "__init__(self, width, height): self.width = width self.height = height def __del__(self): # Called", "# if rec is not defined, returns memory address # rec destroyed here" ]
[ "--logdir=log`. References ---------- http://edwardlib.org/tutorials/bayesian-neural-network \"\"\" from __future__ import absolute_import from __future__ import division", "scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 10]), name=\"scale\"))) with tf.name_scope(\"qW_2\"): qW_2 = Normal(loc=tf.Variable(tf.random_normal([10, 1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10,", "= Normal(loc=tf.Variable(tf.random_normal([10, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 10]), name=\"scale\"))) with tf.name_scope(\"qW_2\"): qW_2 = Normal(loc=tf.Variable(tf.random_normal([10,", "inference = ed.KLqp({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1, W_2: qW_2, b_2:", "with tf.name_scope(\"model\"): W_0 = Normal(loc=tf.zeros([D, 10]), scale=tf.ones([D, 10]), name=\"W_0\") W_1 = Normal(loc=tf.zeros([10, 10]),", "name=\"scale\"))) with tf.name_scope(\"qb_1\"): qb_1 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_2\"): qb_2", "Blundell et al. (2015); Kucukelbir et al. (2016)). Inspired by autograd's Bayesian neural", "of features # DATA X_train, y_train = build_toy_dataset(N) # MODEL with tf.name_scope(\"model\"): W_0", "D = 1 X = np.concatenate([np.linspace(0, 2, num=N / 2), np.linspace(6, 8, num=N", "neural_network(X): h = tf.tanh(tf.matmul(X, W_0) + b_0) h = tf.tanh(tf.matmul(h, W_1) + b_1)", "= 1 # number of features # DATA X_train, y_train = build_toy_dataset(N) #", "tf.name_scope(\"qb_0\"): qb_0 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_1\"): qb_1 = Normal(loc=tf.Variable(tf.random_normal([10]),", "X = np.concatenate([np.linspace(0, 2, num=N / 2), np.linspace(6, 8, num=N / 2)]) y", "1]), name=\"scale\"))) with tf.name_scope(\"qb_0\"): qb_0 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_1\"):", "y = Normal(loc=neural_network(X), scale=0.1 * tf.ones(N), name=\"y\") # INFERENCE with tf.name_scope(\"posterior\"): with tf.name_scope(\"qW_0\"):", "<reponame>xiangze/edward #!/usr/bin/env python \"\"\"Bayesian neural network using variational inference (see, e.g., Blundell et", "tf.reshape(h, [-1]) ed.set_seed(42) N = 40 # number of data points D =", "build_toy_dataset(N) # MODEL with tf.name_scope(\"model\"): W_0 = Normal(loc=tf.zeros([D, 10]), scale=tf.ones([D, 10]), name=\"W_0\") W_1", "= Normal(loc=tf.Variable(tf.random_normal([D, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([D, 10]), name=\"scale\"))) with tf.name_scope(\"qW_1\"): qW_1 = Normal(loc=tf.Variable(tf.random_normal([10,", "Normal def build_toy_dataset(N=40, noise_std=0.1): D = 1 X = np.concatenate([np.linspace(0, 2, num=N /", "= ed.KLqp({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1, W_2: qW_2, b_2: qb_2},", "= build_toy_dataset(N) # MODEL with tf.name_scope(\"model\"): W_0 = Normal(loc=tf.zeros([D, 10]), scale=tf.ones([D, 10]), name=\"W_0\")", "Normal(loc=tf.Variable(tf.random_normal([10, 1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 1]), name=\"scale\"))) with tf.name_scope(\"qb_0\"): qb_0 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"),", "Inspired by autograd's Bayesian neural network example. This example prettifies some of the", "inference (see, e.g., Blundell et al. (2015); Kucukelbir et al. (2016)). Inspired by", "neural network using variational inference (see, e.g., Blundell et al. (2015); Kucukelbir et", "X_train, y_train = build_toy_dataset(N) # MODEL with tf.name_scope(\"model\"): W_0 = Normal(loc=tf.zeros([D, 10]), scale=tf.ones([D,", "scale=tf.ones(10), name=\"b_0\") b_1 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_1\") b_2 = Normal(loc=tf.zeros(1), scale=tf.ones(1), name=\"b_2\") X", "tf.name_scope(\"qW_1\"): qW_1 = Normal(loc=tf.Variable(tf.random_normal([10, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 10]), name=\"scale\"))) with tf.name_scope(\"qW_2\"): qW_2", "qW_2 = Normal(loc=tf.Variable(tf.random_normal([10, 1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 1]), name=\"scale\"))) with tf.name_scope(\"qb_0\"): qb_0 =", "tf.matmul(h, W_2) + b_2 return tf.reshape(h, [-1]) ed.set_seed(42) N = 40 # number", "name=\"X\") y = Normal(loc=neural_network(X), scale=0.1 * tf.ones(N), name=\"y\") # INFERENCE with tf.name_scope(\"posterior\"): with", "- 4.0) / 4.0 X = X.reshape((N, D)) return X, y def neural_network(X):", "D = 1 # number of features # DATA X_train, y_train = build_toy_dataset(N)", "/ 2), np.linspace(6, 8, num=N / 2)]) y = np.cos(X) + np.random.normal(0, noise_std,", "# INFERENCE with tf.name_scope(\"posterior\"): with tf.name_scope(\"qW_0\"): qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([D,", "W_1: qW_1, b_1: qb_1, W_2: qW_2, b_2: qb_2}, data={X: X_train, y: y_train}) inference.run(logdir='log')", "#!/usr/bin/env python \"\"\"Bayesian neural network using variational inference (see, e.g., Blundell et al.", "= X.reshape((N, D)) return X, y def neural_network(X): h = tf.tanh(tf.matmul(X, W_0) +", "8, num=N / 2)]) y = np.cos(X) + np.random.normal(0, noise_std, size=N) X =", "import print_function import edward as ed import numpy as np import tensorflow as", "= np.cos(X) + np.random.normal(0, noise_std, size=N) X = (X - 4.0) / 4.0", "D)) return X, y def neural_network(X): h = tf.tanh(tf.matmul(X, W_0) + b_0) h", "the tensor naming for visualization in TensorBoard. To view TensorBoard, run `tensorboard --logdir=log`.", "+ b_2 return tf.reshape(h, [-1]) ed.set_seed(42) N = 40 # number of data", "scale=tf.ones([D, 10]), name=\"W_0\") W_1 = Normal(loc=tf.zeros([10, 10]), scale=tf.ones([10, 10]), name=\"W_1\") W_2 = Normal(loc=tf.zeros([10,", "run `tensorboard --logdir=log`. References ---------- http://edwardlib.org/tutorials/bayesian-neural-network \"\"\" from __future__ import absolute_import from __future__", "np import tensorflow as tf from edward.models import Normal def build_toy_dataset(N=40, noise_std=0.1): D", "b_0 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_0\") b_1 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_1\") b_2 = Normal(loc=tf.zeros(1),", "example. This example prettifies some of the tensor naming for visualization in TensorBoard.", "name=\"y\") # INFERENCE with tf.name_scope(\"posterior\"): with tf.name_scope(\"qW_0\"): qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, 10]), name=\"loc\"), scale=tf.nn.softplus(", "tf.Variable(tf.random_normal([D, 10]), name=\"scale\"))) with tf.name_scope(\"qW_1\"): qW_1 = Normal(loc=tf.Variable(tf.random_normal([10, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 10]),", "with tf.name_scope(\"qW_2\"): qW_2 = Normal(loc=tf.Variable(tf.random_normal([10, 1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 1]), name=\"scale\"))) with tf.name_scope(\"qb_0\"):", "= Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_1\"): qb_1 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus(", "= Normal(loc=tf.zeros([10, 1]), scale=tf.ones([10, 1]), name=\"W_2\") b_0 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_0\") b_1 =", "tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_1\"): qb_1 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_2\"):", "import Normal def build_toy_dataset(N=40, noise_std=0.1): D = 1 X = np.concatenate([np.linspace(0, 2, num=N", "num=N / 2)]) y = np.cos(X) + np.random.normal(0, noise_std, size=N) X = (X", "qb_0 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_1\"): qb_1 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"),", "INFERENCE with tf.name_scope(\"posterior\"): with tf.name_scope(\"qW_0\"): qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([D, 10]),", "Kucukelbir et al. (2016)). Inspired by autograd's Bayesian neural network example. This example", "= Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_0\") b_1 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_1\") b_2 = Normal(loc=tf.zeros(1), scale=tf.ones(1),", "as np import tensorflow as tf from edward.models import Normal def build_toy_dataset(N=40, noise_std=0.1):", "name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([D, 10]), name=\"scale\"))) with tf.name_scope(\"qW_1\"): qW_1 = Normal(loc=tf.Variable(tf.random_normal([10, 10]), name=\"loc\"), scale=tf.nn.softplus(", "edward.models import Normal def build_toy_dataset(N=40, noise_std=0.1): D = 1 X = np.concatenate([np.linspace(0, 2,", "= tf.placeholder(tf.float32, [N, D], name=\"X\") y = Normal(loc=neural_network(X), scale=0.1 * tf.ones(N), name=\"y\") #", "with tf.name_scope(\"qb_1\"): qb_1 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_2\"): qb_2 =", "10]), name=\"scale\"))) with tf.name_scope(\"qW_1\"): qW_1 = Normal(loc=tf.Variable(tf.random_normal([10, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 10]), name=\"scale\")))", "al. (2015); Kucukelbir et al. (2016)). Inspired by autograd's Bayesian neural network example.", "num=N / 2), np.linspace(6, 8, num=N / 2)]) y = np.cos(X) + np.random.normal(0,", "# number of data points D = 1 # number of features #", "name=\"b_1\") b_2 = Normal(loc=tf.zeros(1), scale=tf.ones(1), name=\"b_2\") X = tf.placeholder(tf.float32, [N, D], name=\"X\") y", "1]), scale=tf.ones([10, 1]), name=\"W_2\") b_0 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_0\") b_1 = Normal(loc=tf.zeros(10), scale=tf.ones(10),", "b_2 = Normal(loc=tf.zeros(1), scale=tf.ones(1), name=\"b_2\") X = tf.placeholder(tf.float32, [N, D], name=\"X\") y =", "name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([1]), name=\"scale\"))) inference = ed.KLqp({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1:", "W_0 = Normal(loc=tf.zeros([D, 10]), scale=tf.ones([D, 10]), name=\"W_0\") W_1 = Normal(loc=tf.zeros([10, 10]), scale=tf.ones([10, 10]),", "= 40 # number of data points D = 1 # number of", "import edward as ed import numpy as np import tensorflow as tf from", "name=\"W_1\") W_2 = Normal(loc=tf.zeros([10, 1]), scale=tf.ones([10, 1]), name=\"W_2\") b_0 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_0\")", "\"\"\"Bayesian neural network using variational inference (see, e.g., Blundell et al. (2015); Kucukelbir", "neural network example. This example prettifies some of the tensor naming for visualization", "import division from __future__ import print_function import edward as ed import numpy as", "np.cos(X) + np.random.normal(0, noise_std, size=N) X = (X - 4.0) / 4.0 X", "W_0) + b_0) h = tf.tanh(tf.matmul(h, W_1) + b_1) h = tf.matmul(h, W_2)", "qb_1 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_2\"): qb_2 = Normal(loc=tf.Variable(tf.random_normal([1]), name=\"loc\"),", "N = 40 # number of data points D = 1 # number", "Normal(loc=tf.zeros([10, 10]), scale=tf.ones([10, 10]), name=\"W_1\") W_2 = Normal(loc=tf.zeros([10, 1]), scale=tf.ones([10, 1]), name=\"W_2\") b_0", "ed.KLqp({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1, W_2: qW_2, b_2: qb_2}, data={X:", "by autograd's Bayesian neural network example. This example prettifies some of the tensor", "view TensorBoard, run `tensorboard --logdir=log`. References ---------- http://edwardlib.org/tutorials/bayesian-neural-network \"\"\" from __future__ import absolute_import", "name=\"scale\"))) with tf.name_scope(\"qb_0\"): qb_0 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_1\"): qb_1", "build_toy_dataset(N=40, noise_std=0.1): D = 1 X = np.concatenate([np.linspace(0, 2, num=N / 2), np.linspace(6,", "(2016)). Inspired by autograd's Bayesian neural network example. This example prettifies some of", "(see, e.g., Blundell et al. (2015); Kucukelbir et al. (2016)). Inspired by autograd's", "h = tf.tanh(tf.matmul(X, W_0) + b_0) h = tf.tanh(tf.matmul(h, W_1) + b_1) h", "print_function import edward as ed import numpy as np import tensorflow as tf", "[-1]) ed.set_seed(42) N = 40 # number of data points D = 1", "variational inference (see, e.g., Blundell et al. (2015); Kucukelbir et al. (2016)). Inspired", "np.concatenate([np.linspace(0, 2, num=N / 2), np.linspace(6, 8, num=N / 2)]) y = np.cos(X)", "autograd's Bayesian neural network example. This example prettifies some of the tensor naming", "in TensorBoard. To view TensorBoard, run `tensorboard --logdir=log`. References ---------- http://edwardlib.org/tutorials/bayesian-neural-network \"\"\" from", "Normal(loc=neural_network(X), scale=0.1 * tf.ones(N), name=\"y\") # INFERENCE with tf.name_scope(\"posterior\"): with tf.name_scope(\"qW_0\"): qW_0 =", "= Normal(loc=tf.zeros([10, 10]), scale=tf.ones([10, 10]), name=\"W_1\") W_2 = Normal(loc=tf.zeros([10, 1]), scale=tf.ones([10, 1]), name=\"W_2\")", "2), np.linspace(6, 8, num=N / 2)]) y = np.cos(X) + np.random.normal(0, noise_std, size=N)", "absolute_import from __future__ import division from __future__ import print_function import edward as ed", "tf.Variable(tf.random_normal([10, 1]), name=\"scale\"))) with tf.name_scope(\"qb_0\"): qb_0 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with", "name=\"scale\"))) inference = ed.KLqp({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1, W_2: qW_2,", "features # DATA X_train, y_train = build_toy_dataset(N) # MODEL with tf.name_scope(\"model\"): W_0 =", "Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_2\"): qb_2 = Normal(loc=tf.Variable(tf.random_normal([1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([1]),", "This example prettifies some of the tensor naming for visualization in TensorBoard. To", "name=\"b_2\") X = tf.placeholder(tf.float32, [N, D], name=\"X\") y = Normal(loc=neural_network(X), scale=0.1 * tf.ones(N),", "(2015); Kucukelbir et al. (2016)). Inspired by autograd's Bayesian neural network example. This", "with tf.name_scope(\"posterior\"): with tf.name_scope(\"qW_0\"): qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([D, 10]), name=\"scale\")))", "np.linspace(6, 8, num=N / 2)]) y = np.cos(X) + np.random.normal(0, noise_std, size=N) X", "name=\"scale\"))) with tf.name_scope(\"qW_2\"): qW_2 = Normal(loc=tf.Variable(tf.random_normal([10, 1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 1]), name=\"scale\"))) with", "10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 10]), name=\"scale\"))) with tf.name_scope(\"qW_2\"): qW_2 = Normal(loc=tf.Variable(tf.random_normal([10, 1]), name=\"loc\"),", "qb_2 = Normal(loc=tf.Variable(tf.random_normal([1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([1]), name=\"scale\"))) inference = ed.KLqp({W_0: qW_0, b_0: qb_0,", "qb_0, W_1: qW_1, b_1: qb_1, W_2: qW_2, b_2: qb_2}, data={X: X_train, y: y_train})", "\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import print_function", "naming for visualization in TensorBoard. To view TensorBoard, run `tensorboard --logdir=log`. References ----------", "from edward.models import Normal def build_toy_dataset(N=40, noise_std=0.1): D = 1 X = np.concatenate([np.linspace(0,", "X = X.reshape((N, D)) return X, y def neural_network(X): h = tf.tanh(tf.matmul(X, W_0)", "MODEL with tf.name_scope(\"model\"): W_0 = Normal(loc=tf.zeros([D, 10]), scale=tf.ones([D, 10]), name=\"W_0\") W_1 = Normal(loc=tf.zeros([10,", "scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_2\"): qb_2 = Normal(loc=tf.Variable(tf.random_normal([1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([1]), name=\"scale\"))) inference", "of data points D = 1 # number of features # DATA X_train,", "division from __future__ import print_function import edward as ed import numpy as np", "tf from edward.models import Normal def build_toy_dataset(N=40, noise_std=0.1): D = 1 X =", "points D = 1 # number of features # DATA X_train, y_train =", "y def neural_network(X): h = tf.tanh(tf.matmul(X, W_0) + b_0) h = tf.tanh(tf.matmul(h, W_1)", "al. (2016)). Inspired by autograd's Bayesian neural network example. This example prettifies some", "of the tensor naming for visualization in TensorBoard. To view TensorBoard, run `tensorboard", "10]), name=\"W_0\") W_1 = Normal(loc=tf.zeros([10, 10]), scale=tf.ones([10, 10]), name=\"W_1\") W_2 = Normal(loc=tf.zeros([10, 1]),", "b_1 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_1\") b_2 = Normal(loc=tf.zeros(1), scale=tf.ones(1), name=\"b_2\") X = tf.placeholder(tf.float32,", "/ 4.0 X = X.reshape((N, D)) return X, y def neural_network(X): h =", "tf.name_scope(\"qW_0\"): qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([D, 10]), name=\"scale\"))) with tf.name_scope(\"qW_1\"): qW_1", "scale=tf.nn.softplus( tf.Variable(tf.random_normal([1]), name=\"scale\"))) inference = ed.KLqp({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1,", "__future__ import division from __future__ import print_function import edward as ed import numpy", "noise_std, size=N) X = (X - 4.0) / 4.0 X = X.reshape((N, D))", "= tf.tanh(tf.matmul(h, W_1) + b_1) h = tf.matmul(h, W_2) + b_2 return tf.reshape(h,", "tf.name_scope(\"qb_2\"): qb_2 = Normal(loc=tf.Variable(tf.random_normal([1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([1]), name=\"scale\"))) inference = ed.KLqp({W_0: qW_0, b_0:", "et al. (2015); Kucukelbir et al. (2016)). Inspired by autograd's Bayesian neural network", "scale=tf.ones([10, 10]), name=\"W_1\") W_2 = Normal(loc=tf.zeros([10, 1]), scale=tf.ones([10, 1]), name=\"W_2\") b_0 = Normal(loc=tf.zeros(10),", "`tensorboard --logdir=log`. References ---------- http://edwardlib.org/tutorials/bayesian-neural-network \"\"\" from __future__ import absolute_import from __future__ import", "network example. This example prettifies some of the tensor naming for visualization in", "qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([D, 10]), name=\"scale\"))) with tf.name_scope(\"qW_1\"): qW_1 =", "scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_1\"): qb_1 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with", "= Normal(loc=tf.zeros([D, 10]), scale=tf.ones([D, 10]), name=\"W_0\") W_1 = Normal(loc=tf.zeros([10, 10]), scale=tf.ones([10, 10]), name=\"W_1\")", "X = tf.placeholder(tf.float32, [N, D], name=\"X\") y = Normal(loc=neural_network(X), scale=0.1 * tf.ones(N), name=\"y\")", "from __future__ import division from __future__ import print_function import edward as ed import", "X.reshape((N, D)) return X, y def neural_network(X): h = tf.tanh(tf.matmul(X, W_0) + b_0)", "+ b_0) h = tf.tanh(tf.matmul(h, W_1) + b_1) h = tf.matmul(h, W_2) +", "tf.tanh(tf.matmul(X, W_0) + b_0) h = tf.tanh(tf.matmul(h, W_1) + b_1) h = tf.matmul(h,", "scale=tf.nn.softplus( tf.Variable(tf.random_normal([D, 10]), name=\"scale\"))) with tf.name_scope(\"qW_1\"): qW_1 = Normal(loc=tf.Variable(tf.random_normal([10, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10,", "/ 2)]) y = np.cos(X) + np.random.normal(0, noise_std, size=N) X = (X -", "= Normal(loc=tf.zeros(1), scale=tf.ones(1), name=\"b_2\") X = tf.placeholder(tf.float32, [N, D], name=\"X\") y = Normal(loc=neural_network(X),", "edward as ed import numpy as np import tensorflow as tf from edward.models", "= np.concatenate([np.linspace(0, 2, num=N / 2), np.linspace(6, 8, num=N / 2)]) y =", "data points D = 1 # number of features # DATA X_train, y_train", "40 # number of data points D = 1 # number of features", "2)]) y = np.cos(X) + np.random.normal(0, noise_std, size=N) X = (X - 4.0)", "Normal(loc=tf.zeros([10, 1]), scale=tf.ones([10, 1]), name=\"W_2\") b_0 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_0\") b_1 = Normal(loc=tf.zeros(10),", "for visualization in TensorBoard. To view TensorBoard, run `tensorboard --logdir=log`. References ---------- http://edwardlib.org/tutorials/bayesian-neural-network", "# DATA X_train, y_train = build_toy_dataset(N) # MODEL with tf.name_scope(\"model\"): W_0 = Normal(loc=tf.zeros([D,", "tf.name_scope(\"model\"): W_0 = Normal(loc=tf.zeros([D, 10]), scale=tf.ones([D, 10]), name=\"W_0\") W_1 = Normal(loc=tf.zeros([10, 10]), scale=tf.ones([10,", "= tf.matmul(h, W_2) + b_2 return tf.reshape(h, [-1]) ed.set_seed(42) N = 40 #", "return X, y def neural_network(X): h = tf.tanh(tf.matmul(X, W_0) + b_0) h =", "ed.set_seed(42) N = 40 # number of data points D = 1 #", "using variational inference (see, e.g., Blundell et al. (2015); Kucukelbir et al. (2016)).", "Normal(loc=tf.Variable(tf.random_normal([10, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 10]), name=\"scale\"))) with tf.name_scope(\"qW_2\"): qW_2 = Normal(loc=tf.Variable(tf.random_normal([10, 1]),", "+ b_1) h = tf.matmul(h, W_2) + b_2 return tf.reshape(h, [-1]) ed.set_seed(42) N", "h = tf.matmul(h, W_2) + b_2 return tf.reshape(h, [-1]) ed.set_seed(42) N = 40", "network using variational inference (see, e.g., Blundell et al. (2015); Kucukelbir et al.", "__future__ import print_function import edward as ed import numpy as np import tensorflow", "10]), name=\"scale\"))) with tf.name_scope(\"qW_2\"): qW_2 = Normal(loc=tf.Variable(tf.random_normal([10, 1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 1]), name=\"scale\")))", "import numpy as np import tensorflow as tf from edward.models import Normal def", "= Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_2\"): qb_2 = Normal(loc=tf.Variable(tf.random_normal([1]), name=\"loc\"), scale=tf.nn.softplus(", "W_1 = Normal(loc=tf.zeros([10, 10]), scale=tf.ones([10, 10]), name=\"W_1\") W_2 = Normal(loc=tf.zeros([10, 1]), scale=tf.ones([10, 1]),", "Normal(loc=tf.zeros(1), scale=tf.ones(1), name=\"b_2\") X = tf.placeholder(tf.float32, [N, D], name=\"X\") y = Normal(loc=neural_network(X), scale=0.1", "TensorBoard, run `tensorboard --logdir=log`. References ---------- http://edwardlib.org/tutorials/bayesian-neural-network \"\"\" from __future__ import absolute_import from", "name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_2\"): qb_2 = Normal(loc=tf.Variable(tf.random_normal([1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([1]), name=\"scale\")))", "Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_1\") b_2 = Normal(loc=tf.zeros(1), scale=tf.ones(1), name=\"b_2\") X = tf.placeholder(tf.float32, [N, D],", "name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 10]), name=\"scale\"))) with tf.name_scope(\"qW_2\"): qW_2 = Normal(loc=tf.Variable(tf.random_normal([10, 1]), name=\"loc\"), scale=tf.nn.softplus(", "__future__ import absolute_import from __future__ import division from __future__ import print_function import edward", "as ed import numpy as np import tensorflow as tf from edward.models import", "qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1, W_2: qW_2, b_2: qb_2}, data={X: X_train,", "def neural_network(X): h = tf.tanh(tf.matmul(X, W_0) + b_0) h = tf.tanh(tf.matmul(h, W_1) +", "To view TensorBoard, run `tensorboard --logdir=log`. References ---------- http://edwardlib.org/tutorials/bayesian-neural-network \"\"\" from __future__ import", "10]), name=\"W_1\") W_2 = Normal(loc=tf.zeros([10, 1]), scale=tf.ones([10, 1]), name=\"W_2\") b_0 = Normal(loc=tf.zeros(10), scale=tf.ones(10),", "1 # number of features # DATA X_train, y_train = build_toy_dataset(N) # MODEL", "= 1 X = np.concatenate([np.linspace(0, 2, num=N / 2), np.linspace(6, 8, num=N /", "np.random.normal(0, noise_std, size=N) X = (X - 4.0) / 4.0 X = X.reshape((N,", "tf.Variable(tf.random_normal([10, 10]), name=\"scale\"))) with tf.name_scope(\"qW_2\"): qW_2 = Normal(loc=tf.Variable(tf.random_normal([10, 1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 1]),", "= Normal(loc=tf.Variable(tf.random_normal([10, 1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 1]), name=\"scale\"))) with tf.name_scope(\"qb_0\"): qb_0 = Normal(loc=tf.Variable(tf.random_normal([10]),", "References ---------- http://edwardlib.org/tutorials/bayesian-neural-network \"\"\" from __future__ import absolute_import from __future__ import division from", "= tf.tanh(tf.matmul(X, W_0) + b_0) h = tf.tanh(tf.matmul(h, W_1) + b_1) h =", "DATA X_train, y_train = build_toy_dataset(N) # MODEL with tf.name_scope(\"model\"): W_0 = Normal(loc=tf.zeros([D, 10]),", "def build_toy_dataset(N=40, noise_std=0.1): D = 1 X = np.concatenate([np.linspace(0, 2, num=N / 2),", "TensorBoard. To view TensorBoard, run `tensorboard --logdir=log`. References ---------- http://edwardlib.org/tutorials/bayesian-neural-network \"\"\" from __future__", "2, num=N / 2), np.linspace(6, 8, num=N / 2)]) y = np.cos(X) +", "Normal(loc=tf.Variable(tf.random_normal([1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([1]), name=\"scale\"))) inference = ed.KLqp({W_0: qW_0, b_0: qb_0, W_1: qW_1,", "scale=0.1 * tf.ones(N), name=\"y\") # INFERENCE with tf.name_scope(\"posterior\"): with tf.name_scope(\"qW_0\"): qW_0 = Normal(loc=tf.Variable(tf.random_normal([D,", "W_2) + b_2 return tf.reshape(h, [-1]) ed.set_seed(42) N = 40 # number of", "Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_1\"): qb_1 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]),", "prettifies some of the tensor naming for visualization in TensorBoard. To view TensorBoard,", "y = np.cos(X) + np.random.normal(0, noise_std, size=N) X = (X - 4.0) /", "10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([D, 10]), name=\"scale\"))) with tf.name_scope(\"qW_1\"): qW_1 = Normal(loc=tf.Variable(tf.random_normal([10, 10]), name=\"loc\"),", "= Normal(loc=tf.Variable(tf.random_normal([1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([1]), name=\"scale\"))) inference = ed.KLqp({W_0: qW_0, b_0: qb_0, W_1:", "W_2 = Normal(loc=tf.zeros([10, 1]), scale=tf.ones([10, 1]), name=\"W_2\") b_0 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_0\") b_1", "10]), scale=tf.ones([D, 10]), name=\"W_0\") W_1 = Normal(loc=tf.zeros([10, 10]), scale=tf.ones([10, 10]), name=\"W_1\") W_2 =", "some of the tensor naming for visualization in TensorBoard. To view TensorBoard, run", "tf.tanh(tf.matmul(h, W_1) + b_1) h = tf.matmul(h, W_2) + b_2 return tf.reshape(h, [-1])", "4.0 X = X.reshape((N, D)) return X, y def neural_network(X): h = tf.tanh(tf.matmul(X,", "name=\"W_0\") W_1 = Normal(loc=tf.zeros([10, 10]), scale=tf.ones([10, 10]), name=\"W_1\") W_2 = Normal(loc=tf.zeros([10, 1]), scale=tf.ones([10,", "import absolute_import from __future__ import division from __future__ import print_function import edward as", "scale=tf.ones([10, 1]), name=\"W_2\") b_0 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_0\") b_1 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_1\")", "tf.placeholder(tf.float32, [N, D], name=\"X\") y = Normal(loc=neural_network(X), scale=0.1 * tf.ones(N), name=\"y\") # INFERENCE", "y_train = build_toy_dataset(N) # MODEL with tf.name_scope(\"model\"): W_0 = Normal(loc=tf.zeros([D, 10]), scale=tf.ones([D, 10]),", "= Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_1\") b_2 = Normal(loc=tf.zeros(1), scale=tf.ones(1), name=\"b_2\") X = tf.placeholder(tf.float32, [N,", "numpy as np import tensorflow as tf from edward.models import Normal def build_toy_dataset(N=40,", "Normal(loc=tf.Variable(tf.random_normal([D, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([D, 10]), name=\"scale\"))) with tf.name_scope(\"qW_1\"): qW_1 = Normal(loc=tf.Variable(tf.random_normal([10, 10]),", "return tf.reshape(h, [-1]) ed.set_seed(42) N = 40 # number of data points D", "# number of features # DATA X_train, y_train = build_toy_dataset(N) # MODEL with", "example prettifies some of the tensor naming for visualization in TensorBoard. To view", "b_2 return tf.reshape(h, [-1]) ed.set_seed(42) N = 40 # number of data points", "X, y def neural_network(X): h = tf.tanh(tf.matmul(X, W_0) + b_0) h = tf.tanh(tf.matmul(h,", "Normal(loc=tf.zeros([D, 10]), scale=tf.ones([D, 10]), name=\"W_0\") W_1 = Normal(loc=tf.zeros([10, 10]), scale=tf.ones([10, 10]), name=\"W_1\") W_2", "size=N) X = (X - 4.0) / 4.0 X = X.reshape((N, D)) return", "ed import numpy as np import tensorflow as tf from edward.models import Normal", "h = tf.tanh(tf.matmul(h, W_1) + b_1) h = tf.matmul(h, W_2) + b_2 return", "number of data points D = 1 # number of features # DATA", "with tf.name_scope(\"qb_0\"): qb_0 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_1\"): qb_1 =", "Bayesian neural network example. This example prettifies some of the tensor naming for", "tf.Variable(tf.random_normal([1]), name=\"scale\"))) inference = ed.KLqp({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1, W_2:", "with tf.name_scope(\"qb_2\"): qb_2 = Normal(loc=tf.Variable(tf.random_normal([1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([1]), name=\"scale\"))) inference = ed.KLqp({W_0: qW_0,", "noise_std=0.1): D = 1 X = np.concatenate([np.linspace(0, 2, num=N / 2), np.linspace(6, 8,", "* tf.ones(N), name=\"y\") # INFERENCE with tf.name_scope(\"posterior\"): with tf.name_scope(\"qW_0\"): qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, 10]),", "with tf.name_scope(\"qW_0\"): qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([D, 10]), name=\"scale\"))) with tf.name_scope(\"qW_1\"):", "1]), name=\"W_2\") b_0 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_0\") b_1 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_1\") b_2", "scale=tf.ones(10), name=\"b_1\") b_2 = Normal(loc=tf.zeros(1), scale=tf.ones(1), name=\"b_2\") X = tf.placeholder(tf.float32, [N, D], name=\"X\")", "[N, D], name=\"X\") y = Normal(loc=neural_network(X), scale=0.1 * tf.ones(N), name=\"y\") # INFERENCE with", "name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 1]), name=\"scale\"))) with tf.name_scope(\"qb_0\"): qb_0 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]),", "from __future__ import absolute_import from __future__ import division from __future__ import print_function import", "tf.name_scope(\"qb_1\"): qb_1 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_2\"): qb_2 = Normal(loc=tf.Variable(tf.random_normal([1]),", "b_0: qb_0, W_1: qW_1, b_1: qb_1, W_2: qW_2, b_2: qb_2}, data={X: X_train, y:", "qW_1 = Normal(loc=tf.Variable(tf.random_normal([10, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 10]), name=\"scale\"))) with tf.name_scope(\"qW_2\"): qW_2 =", "= (X - 4.0) / 4.0 X = X.reshape((N, D)) return X, y", "scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 1]), name=\"scale\"))) with tf.name_scope(\"qb_0\"): qb_0 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\")))", "W_1) + b_1) h = tf.matmul(h, W_2) + b_2 return tf.reshape(h, [-1]) ed.set_seed(42)", "import tensorflow as tf from edward.models import Normal def build_toy_dataset(N=40, noise_std=0.1): D =", "visualization in TensorBoard. To view TensorBoard, run `tensorboard --logdir=log`. References ---------- http://edwardlib.org/tutorials/bayesian-neural-network \"\"\"", "from __future__ import print_function import edward as ed import numpy as np import", "X = (X - 4.0) / 4.0 X = X.reshape((N, D)) return X,", "as tf from edward.models import Normal def build_toy_dataset(N=40, noise_std=0.1): D = 1 X", "name=\"W_2\") b_0 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_0\") b_1 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_1\") b_2 =", "tensorflow as tf from edward.models import Normal def build_toy_dataset(N=40, noise_std=0.1): D = 1", "tensor naming for visualization in TensorBoard. To view TensorBoard, run `tensorboard --logdir=log`. References", "et al. (2016)). Inspired by autograd's Bayesian neural network example. This example prettifies", "with tf.name_scope(\"qW_1\"): qW_1 = Normal(loc=tf.Variable(tf.random_normal([10, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 10]), name=\"scale\"))) with tf.name_scope(\"qW_2\"):", "10]), scale=tf.ones([10, 10]), name=\"W_1\") W_2 = Normal(loc=tf.zeros([10, 1]), scale=tf.ones([10, 1]), name=\"W_2\") b_0 =", "e.g., Blundell et al. (2015); Kucukelbir et al. (2016)). Inspired by autograd's Bayesian", "---------- http://edwardlib.org/tutorials/bayesian-neural-network \"\"\" from __future__ import absolute_import from __future__ import division from __future__", "1 X = np.concatenate([np.linspace(0, 2, num=N / 2), np.linspace(6, 8, num=N / 2)])", "b_0) h = tf.tanh(tf.matmul(h, W_1) + b_1) h = tf.matmul(h, W_2) + b_2", "= Normal(loc=neural_network(X), scale=0.1 * tf.ones(N), name=\"y\") # INFERENCE with tf.name_scope(\"posterior\"): with tf.name_scope(\"qW_0\"): qW_0", "number of features # DATA X_train, y_train = build_toy_dataset(N) # MODEL with tf.name_scope(\"model\"):", "tf.ones(N), name=\"y\") # INFERENCE with tf.name_scope(\"posterior\"): with tf.name_scope(\"qW_0\"): qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, 10]), name=\"loc\"),", "name=\"b_0\") b_1 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_1\") b_2 = Normal(loc=tf.zeros(1), scale=tf.ones(1), name=\"b_2\") X =", "(X - 4.0) / 4.0 X = X.reshape((N, D)) return X, y def", "4.0) / 4.0 X = X.reshape((N, D)) return X, y def neural_network(X): h", "scale=tf.ones(1), name=\"b_2\") X = tf.placeholder(tf.float32, [N, D], name=\"X\") y = Normal(loc=neural_network(X), scale=0.1 *", "# MODEL with tf.name_scope(\"model\"): W_0 = Normal(loc=tf.zeros([D, 10]), scale=tf.ones([D, 10]), name=\"W_0\") W_1 =", "b_1) h = tf.matmul(h, W_2) + b_2 return tf.reshape(h, [-1]) ed.set_seed(42) N =", "tf.name_scope(\"posterior\"): with tf.name_scope(\"qW_0\"): qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([D, 10]), name=\"scale\"))) with", "1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 1]), name=\"scale\"))) with tf.name_scope(\"qb_0\"): qb_0 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus(", "http://edwardlib.org/tutorials/bayesian-neural-network \"\"\" from __future__ import absolute_import from __future__ import division from __future__ import", "name=\"scale\"))) with tf.name_scope(\"qb_2\"): qb_2 = Normal(loc=tf.Variable(tf.random_normal([1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([1]), name=\"scale\"))) inference = ed.KLqp({W_0:", "+ np.random.normal(0, noise_std, size=N) X = (X - 4.0) / 4.0 X =", "D], name=\"X\") y = Normal(loc=neural_network(X), scale=0.1 * tf.ones(N), name=\"y\") # INFERENCE with tf.name_scope(\"posterior\"):", "tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_2\"): qb_2 = Normal(loc=tf.Variable(tf.random_normal([1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([1]), name=\"scale\"))) inference =", "tf.name_scope(\"qW_2\"): qW_2 = Normal(loc=tf.Variable(tf.random_normal([10, 1]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 1]), name=\"scale\"))) with tf.name_scope(\"qb_0\"): qb_0", "python \"\"\"Bayesian neural network using variational inference (see, e.g., Blundell et al. (2015);", "name=\"scale\"))) with tf.name_scope(\"qW_1\"): qW_1 = Normal(loc=tf.Variable(tf.random_normal([10, 10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10, 10]), name=\"scale\"))) with", "name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\"))) with tf.name_scope(\"qb_1\"): qb_1 = Normal(loc=tf.Variable(tf.random_normal([10]), name=\"loc\"), scale=tf.nn.softplus( tf.Variable(tf.random_normal([10]), name=\"scale\")))", "Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_0\") b_1 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name=\"b_1\") b_2 = Normal(loc=tf.zeros(1), scale=tf.ones(1), name=\"b_2\")" ]
[ "get_hash, get_script_path, print_scripts def mark_script_as_applied(script_path): hash_ = get_hash(script_path) AppliedManagementScripts.objects.create(file_path=script_path, file_hash=hash_) def tracked_script(decorated_func): \"\"\"", "import AppliedManagementScripts from django_scripts_tracker.settings import COMMANDS_DIRS, TERM_COLORS, CHECK_SCRIPT_GIT_STATUS from django_scripts_tracker.tracker_commons import is_script_ignored, is_tracked_script,", "modifications \"\"\" script_paths = [] for cmd_dir in cmd_dirs: for entry in os.listdir(cmd_dir):", "ignored -> check if @tracked_script decorator is present cmd_dir = os.path.join(*cmd_dir.split('/')) # convert", "convert system dependant slashes script_path = os.path.join(cmd_dir, entry) if is_tracked_script(script_path): script_paths.append(script_path) return script_paths", "tracked for modifications \"\"\" script_paths = [] for cmd_dir in cmd_dirs: for entry", "to be applied.'.format( new_count=len(new_scripts), mod_count=len(modified_scripts), **TERM_COLORS)) if len(new_scripts): print('{LIGHT_CYAN}New scripts ({count}):{NC}'.format(count=len(new_scripts), **TERM_COLORS)) print_scripts(new_scripts)", "print_dependencies(scripts_dependencies, prefix=' ') def check_scripts_signal_handler(sender, **kwargs): from django.conf import settings new_scripts, modified_scripts =", "modified_scripts) print_dependencies(scripts_dependencies, prefix=' ') def check_scripts_signal_handler(sender, **kwargs): from django.conf import settings new_scripts, modified_scripts", "= os.path.join(*cmd_dir.split('/')) # convert system dependant slashes script_path = os.path.join(cmd_dir, entry) if is_tracked_script(script_path):", "file_hash=hash_) def tracked_script(decorated_func): \"\"\" Decorator which logs management scripts executions \"\"\" @wraps(decorated_func) def", "else: mark_script_as_applied(script_path) return result wrapper._is_tracked_script = True return wrapper def _get_trackable_scripts(cmd_dirs): \"\"\" Returns", "_get_trackable_scripts(COMMANDS_DIRS) new_scripts, modified_scripts = filter_unapplied_scripts(script_paths) return (new_scripts, modified_scripts) def print_new_and_modified_scripts(new_scripts, modified_scripts, show_dependencies=False): print('{LIGHT_CYAN}Checking", "= decorated_func(*args, **kwargs) script_module = inspect.getmodule(decorated_func) script_path = get_script_path(script_module) if CHECK_SCRIPT_GIT_STATUS: if not", "print('{LIGHT_CYAN}Checking management scripts:{NC}\\n' ' You have {new_count} new and {mod_count} modified management scripts", "management scripts:{NC}\\n' ' You have {new_count} new and {mod_count} modified management scripts to", "= get_script_path(script_module) if CHECK_SCRIPT_GIT_STATUS: if not has_uncommited_changes(script_path): mark_script_as_applied(script_path) else: mark_script_as_applied(script_path) return result wrapper._is_tracked_script", "scripts_dependencies = build_dependencies_dict(new_scripts + modified_scripts) print_dependencies(scripts_dependencies, prefix=' ') def check_scripts_signal_handler(sender, **kwargs): from django.conf", "@wraps(decorated_func) def wrapper(*args, **kwargs): result = decorated_func(*args, **kwargs) script_module = inspect.getmodule(decorated_func) script_path =", "if hasattr(settings, 'SCRIPTS_TRACKER') and settings.SCRIPTS_TRACKER \\ and settings.SCRIPTS_TRACKER.get('auto_run', False): scripts_dependencies = build_dependencies_dict(new_scripts +", "new_scripts, modified_scripts = get_unapplied_scripts() print_new_and_modified_scripts(new_scripts, modified_scripts) if hasattr(settings, 'SCRIPTS_TRACKER') and settings.SCRIPTS_TRACKER \\ and", "dependant slashes script_path = os.path.join(cmd_dir, entry) if is_tracked_script(script_path): script_paths.append(script_path) return script_paths def get_unapplied_scripts():", "inspect.getmodule(decorated_func) script_path = get_script_path(script_module) if CHECK_SCRIPT_GIT_STATUS: if not has_uncommited_changes(script_path): mark_script_as_applied(script_path) else: mark_script_as_applied(script_path) return", "wrapper._is_tracked_script = True return wrapper def _get_trackable_scripts(cmd_dirs): \"\"\" Returns a list of scripts", "import has_uncommited_changes from django_scripts_tracker.models import AppliedManagementScripts from django_scripts_tracker.settings import COMMANDS_DIRS, TERM_COLORS, CHECK_SCRIPT_GIT_STATUS from", "+ modified_scripts) print_dependencies(scripts_dependencies, prefix=' ') def check_scripts_signal_handler(sender, **kwargs): from django.conf import settings new_scripts,", "return wrapper def _get_trackable_scripts(cmd_dirs): \"\"\" Returns a list of scripts (list of script", "= True return wrapper def _get_trackable_scripts(cmd_dirs): \"\"\" Returns a list of scripts (list", "settings new_scripts, modified_scripts = get_unapplied_scripts() print_new_and_modified_scripts(new_scripts, modified_scripts) if hasattr(settings, 'SCRIPTS_TRACKER') and settings.SCRIPTS_TRACKER \\", "get_script_path, print_scripts def mark_script_as_applied(script_path): hash_ = get_hash(script_path) AppliedManagementScripts.objects.create(file_path=script_path, file_hash=hash_) def tracked_script(decorated_func): \"\"\" Decorator", "import get_hash, get_script_path, print_scripts def mark_script_as_applied(script_path): hash_ = get_hash(script_path) AppliedManagementScripts.objects.create(file_path=script_path, file_hash=hash_) def tracked_script(decorated_func):", "new and {mod_count} modified management scripts to be applied.'.format( new_count=len(new_scripts), mod_count=len(modified_scripts), **TERM_COLORS)) if", "prefix=' ') def check_scripts_signal_handler(sender, **kwargs): from django.conf import settings new_scripts, modified_scripts = get_unapplied_scripts()", "if CHECK_SCRIPT_GIT_STATUS: if not has_uncommited_changes(script_path): mark_script_as_applied(script_path) else: mark_script_as_applied(script_path) return result wrapper._is_tracked_script = True", "from django_scripts_tracker.tracker_commons import is_script_ignored, is_tracked_script, filter_unapplied_scripts from django_scripts_tracker.utils import get_hash, get_script_path, print_scripts def", "mark_script_as_applied(script_path) return result wrapper._is_tracked_script = True return wrapper def _get_trackable_scripts(cmd_dirs): \"\"\" Returns a", "script_paths def get_unapplied_scripts(): script_paths = _get_trackable_scripts(COMMANDS_DIRS) new_scripts, modified_scripts = filter_unapplied_scripts(script_paths) return (new_scripts, modified_scripts)", "not is_script_ignored(entry): # script file is not ignored -> check if @tracked_script decorator", "run_scripts from django_scripts_tracker.git_plugin import has_uncommited_changes from django_scripts_tracker.models import AppliedManagementScripts from django_scripts_tracker.settings import COMMANDS_DIRS,", "not has_uncommited_changes(script_path): mark_script_as_applied(script_path) else: mark_script_as_applied(script_path) return result wrapper._is_tracked_script = True return wrapper def", "functools import wraps from django_scripts_tracker.dependency_resolver import build_dependencies_dict, print_dependencies, run_scripts from django_scripts_tracker.git_plugin import has_uncommited_changes", "\"\"\" script_paths = [] for cmd_dir in cmd_dirs: for entry in os.listdir(cmd_dir): if", "inspect import os from functools import wraps from django_scripts_tracker.dependency_resolver import build_dependencies_dict, print_dependencies, run_scripts", "build_dependencies_dict, print_dependencies, run_scripts from django_scripts_tracker.git_plugin import has_uncommited_changes from django_scripts_tracker.models import AppliedManagementScripts from django_scripts_tracker.settings", "script paths) that should be tracked for modifications \"\"\" script_paths = [] for", "is_tracked_script(script_path): script_paths.append(script_path) return script_paths def get_unapplied_scripts(): script_paths = _get_trackable_scripts(COMMANDS_DIRS) new_scripts, modified_scripts = filter_unapplied_scripts(script_paths)", "({count}):{NC}'.format(count=len(new_scripts), **TERM_COLORS)) print_scripts(new_scripts) if len(modified_scripts): print('{LIGHT_CYAN}Modified scripts ({count}):{NC}'.format(count=len(modified_scripts), **TERM_COLORS)) print_scripts(modified_scripts) if show_dependencies and", "list of scripts (list of script paths) that should be tracked for modifications", "for cmd_dir in cmd_dirs: for entry in os.listdir(cmd_dir): if not is_script_ignored(entry): # script", "return (new_scripts, modified_scripts) def print_new_and_modified_scripts(new_scripts, modified_scripts, show_dependencies=False): print('{LIGHT_CYAN}Checking management scripts:{NC}\\n' ' You have", "tracked_script(decorated_func): \"\"\" Decorator which logs management scripts executions \"\"\" @wraps(decorated_func) def wrapper(*args, **kwargs):", "= build_dependencies_dict(new_scripts + modified_scripts) print_dependencies(scripts_dependencies, prefix=' ') def check_scripts_signal_handler(sender, **kwargs): from django.conf import", "get_unapplied_scripts(): script_paths = _get_trackable_scripts(COMMANDS_DIRS) new_scripts, modified_scripts = filter_unapplied_scripts(script_paths) return (new_scripts, modified_scripts) def print_new_and_modified_scripts(new_scripts,", "decorator is present cmd_dir = os.path.join(*cmd_dir.split('/')) # convert system dependant slashes script_path =", "CHECK_SCRIPT_GIT_STATUS from django_scripts_tracker.tracker_commons import is_script_ignored, is_tracked_script, filter_unapplied_scripts from django_scripts_tracker.utils import get_hash, get_script_path, print_scripts", "of scripts (list of script paths) that should be tracked for modifications \"\"\"", "@tracked_script decorator is present cmd_dir = os.path.join(*cmd_dir.split('/')) # convert system dependant slashes script_path", "get_unapplied_scripts() print_new_and_modified_scripts(new_scripts, modified_scripts) if hasattr(settings, 'SCRIPTS_TRACKER') and settings.SCRIPTS_TRACKER \\ and settings.SCRIPTS_TRACKER.get('auto_run', False): scripts_dependencies", "is_script_ignored(entry): # script file is not ignored -> check if @tracked_script decorator is", "which logs management scripts executions \"\"\" @wraps(decorated_func) def wrapper(*args, **kwargs): result = decorated_func(*args,", "has_uncommited_changes(script_path): mark_script_as_applied(script_path) else: mark_script_as_applied(script_path) return result wrapper._is_tracked_script = True return wrapper def _get_trackable_scripts(cmd_dirs):", "script_paths.append(script_path) return script_paths def get_unapplied_scripts(): script_paths = _get_trackable_scripts(COMMANDS_DIRS) new_scripts, modified_scripts = filter_unapplied_scripts(script_paths) return", "decorated_func(*args, **kwargs) script_module = inspect.getmodule(decorated_func) script_path = get_script_path(script_module) if CHECK_SCRIPT_GIT_STATUS: if not has_uncommited_changes(script_path):", "print_dependencies, run_scripts from django_scripts_tracker.git_plugin import has_uncommited_changes from django_scripts_tracker.models import AppliedManagementScripts from django_scripts_tracker.settings import", "import inspect import os from functools import wraps from django_scripts_tracker.dependency_resolver import build_dependencies_dict, print_dependencies,", "= filter_unapplied_scripts(script_paths) return (new_scripts, modified_scripts) def print_new_and_modified_scripts(new_scripts, modified_scripts, show_dependencies=False): print('{LIGHT_CYAN}Checking management scripts:{NC}\\n' '", "and {mod_count} modified management scripts to be applied.'.format( new_count=len(new_scripts), mod_count=len(modified_scripts), **TERM_COLORS)) if len(new_scripts):", "if @tracked_script decorator is present cmd_dir = os.path.join(*cmd_dir.split('/')) # convert system dependant slashes", "and len(new_scripts + modified_scripts): scripts_dependencies = build_dependencies_dict(new_scripts + modified_scripts) print_dependencies(scripts_dependencies, prefix=' ') def", "wrapper def _get_trackable_scripts(cmd_dirs): \"\"\" Returns a list of scripts (list of script paths)", "len(new_scripts + modified_scripts): scripts_dependencies = build_dependencies_dict(new_scripts + modified_scripts) print_dependencies(scripts_dependencies, prefix=' ') def check_scripts_signal_handler(sender,", "filter_unapplied_scripts(script_paths) return (new_scripts, modified_scripts) def print_new_and_modified_scripts(new_scripts, modified_scripts, show_dependencies=False): print('{LIGHT_CYAN}Checking management scripts:{NC}\\n' ' You", "show_dependencies=False): print('{LIGHT_CYAN}Checking management scripts:{NC}\\n' ' You have {new_count} new and {mod_count} modified management", "= inspect.getmodule(decorated_func) script_path = get_script_path(script_module) if CHECK_SCRIPT_GIT_STATUS: if not has_uncommited_changes(script_path): mark_script_as_applied(script_path) else: mark_script_as_applied(script_path)", "cmd_dir in cmd_dirs: for entry in os.listdir(cmd_dir): if not is_script_ignored(entry): # script file", "return result wrapper._is_tracked_script = True return wrapper def _get_trackable_scripts(cmd_dirs): \"\"\" Returns a list", "have {new_count} new and {mod_count} modified management scripts to be applied.'.format( new_count=len(new_scripts), mod_count=len(modified_scripts),", "(new_scripts, modified_scripts) def print_new_and_modified_scripts(new_scripts, modified_scripts, show_dependencies=False): print('{LIGHT_CYAN}Checking management scripts:{NC}\\n' ' You have {new_count}", "logs management scripts executions \"\"\" @wraps(decorated_func) def wrapper(*args, **kwargs): result = decorated_func(*args, **kwargs)", "modified_scripts): scripts_dependencies = build_dependencies_dict(new_scripts + modified_scripts) print_dependencies(scripts_dependencies, prefix=' ') def check_scripts_signal_handler(sender, **kwargs): from", "entry in os.listdir(cmd_dir): if not is_script_ignored(entry): # script file is not ignored ->", "get_script_path(script_module) if CHECK_SCRIPT_GIT_STATUS: if not has_uncommited_changes(script_path): mark_script_as_applied(script_path) else: mark_script_as_applied(script_path) return result wrapper._is_tracked_script =", "scripts to be applied.'.format( new_count=len(new_scripts), mod_count=len(modified_scripts), **TERM_COLORS)) if len(new_scripts): print('{LIGHT_CYAN}New scripts ({count}):{NC}'.format(count=len(new_scripts), **TERM_COLORS))", "**kwargs): from django.conf import settings new_scripts, modified_scripts = get_unapplied_scripts() print_new_and_modified_scripts(new_scripts, modified_scripts) if hasattr(settings,", "CHECK_SCRIPT_GIT_STATUS: if not has_uncommited_changes(script_path): mark_script_as_applied(script_path) else: mark_script_as_applied(script_path) return result wrapper._is_tracked_script = True return", "scripts ({count}):{NC}'.format(count=len(modified_scripts), **TERM_COLORS)) print_scripts(modified_scripts) if show_dependencies and len(new_scripts + modified_scripts): scripts_dependencies = build_dependencies_dict(new_scripts", "file is not ignored -> check if @tracked_script decorator is present cmd_dir =", "return script_paths def get_unapplied_scripts(): script_paths = _get_trackable_scripts(COMMANDS_DIRS) new_scripts, modified_scripts = filter_unapplied_scripts(script_paths) return (new_scripts,", "') def check_scripts_signal_handler(sender, **kwargs): from django.conf import settings new_scripts, modified_scripts = get_unapplied_scripts() print_new_and_modified_scripts(new_scripts,", "len(new_scripts): print('{LIGHT_CYAN}New scripts ({count}):{NC}'.format(count=len(new_scripts), **TERM_COLORS)) print_scripts(new_scripts) if len(modified_scripts): print('{LIGHT_CYAN}Modified scripts ({count}):{NC}'.format(count=len(modified_scripts), **TERM_COLORS)) print_scripts(modified_scripts)", "filter_unapplied_scripts from django_scripts_tracker.utils import get_hash, get_script_path, print_scripts def mark_script_as_applied(script_path): hash_ = get_hash(script_path) AppliedManagementScripts.objects.create(file_path=script_path,", "hash_ = get_hash(script_path) AppliedManagementScripts.objects.create(file_path=script_path, file_hash=hash_) def tracked_script(decorated_func): \"\"\" Decorator which logs management scripts", "is not ignored -> check if @tracked_script decorator is present cmd_dir = os.path.join(*cmd_dir.split('/'))", "AppliedManagementScripts from django_scripts_tracker.settings import COMMANDS_DIRS, TERM_COLORS, CHECK_SCRIPT_GIT_STATUS from django_scripts_tracker.tracker_commons import is_script_ignored, is_tracked_script, filter_unapplied_scripts", "script file is not ignored -> check if @tracked_script decorator is present cmd_dir", "not ignored -> check if @tracked_script decorator is present cmd_dir = os.path.join(*cmd_dir.split('/')) #", "= [] for cmd_dir in cmd_dirs: for entry in os.listdir(cmd_dir): if not is_script_ignored(entry):", "django.conf import settings new_scripts, modified_scripts = get_unapplied_scripts() print_new_and_modified_scripts(new_scripts, modified_scripts) if hasattr(settings, 'SCRIPTS_TRACKER') and", "def get_unapplied_scripts(): script_paths = _get_trackable_scripts(COMMANDS_DIRS) new_scripts, modified_scripts = filter_unapplied_scripts(script_paths) return (new_scripts, modified_scripts) def", "You have {new_count} new and {mod_count} modified management scripts to be applied.'.format( new_count=len(new_scripts),", "mod_count=len(modified_scripts), **TERM_COLORS)) if len(new_scripts): print('{LIGHT_CYAN}New scripts ({count}):{NC}'.format(count=len(new_scripts), **TERM_COLORS)) print_scripts(new_scripts) if len(modified_scripts): print('{LIGHT_CYAN}Modified scripts", "should be tracked for modifications \"\"\" script_paths = [] for cmd_dir in cmd_dirs:", "scripts executions \"\"\" @wraps(decorated_func) def wrapper(*args, **kwargs): result = decorated_func(*args, **kwargs) script_module =", "if not has_uncommited_changes(script_path): mark_script_as_applied(script_path) else: mark_script_as_applied(script_path) return result wrapper._is_tracked_script = True return wrapper", "os.path.join(cmd_dir, entry) if is_tracked_script(script_path): script_paths.append(script_path) return script_paths def get_unapplied_scripts(): script_paths = _get_trackable_scripts(COMMANDS_DIRS) new_scripts,", "({count}):{NC}'.format(count=len(modified_scripts), **TERM_COLORS)) print_scripts(modified_scripts) if show_dependencies and len(new_scripts + modified_scripts): scripts_dependencies = build_dependencies_dict(new_scripts +", "for entry in os.listdir(cmd_dir): if not is_script_ignored(entry): # script file is not ignored", "os.listdir(cmd_dir): if not is_script_ignored(entry): # script file is not ignored -> check if", "scripts (list of script paths) that should be tracked for modifications \"\"\" script_paths", "**TERM_COLORS)) if len(new_scripts): print('{LIGHT_CYAN}New scripts ({count}):{NC}'.format(count=len(new_scripts), **TERM_COLORS)) print_scripts(new_scripts) if len(modified_scripts): print('{LIGHT_CYAN}Modified scripts ({count}):{NC}'.format(count=len(modified_scripts),", "be applied.'.format( new_count=len(new_scripts), mod_count=len(modified_scripts), **TERM_COLORS)) if len(new_scripts): print('{LIGHT_CYAN}New scripts ({count}):{NC}'.format(count=len(new_scripts), **TERM_COLORS)) print_scripts(new_scripts) if", "django_scripts_tracker.git_plugin import has_uncommited_changes from django_scripts_tracker.models import AppliedManagementScripts from django_scripts_tracker.settings import COMMANDS_DIRS, TERM_COLORS, CHECK_SCRIPT_GIT_STATUS", "import build_dependencies_dict, print_dependencies, run_scripts from django_scripts_tracker.git_plugin import has_uncommited_changes from django_scripts_tracker.models import AppliedManagementScripts from", "in cmd_dirs: for entry in os.listdir(cmd_dir): if not is_script_ignored(entry): # script file is", "if len(new_scripts): print('{LIGHT_CYAN}New scripts ({count}):{NC}'.format(count=len(new_scripts), **TERM_COLORS)) print_scripts(new_scripts) if len(modified_scripts): print('{LIGHT_CYAN}Modified scripts ({count}):{NC}'.format(count=len(modified_scripts), **TERM_COLORS))", "django_scripts_tracker.models import AppliedManagementScripts from django_scripts_tracker.settings import COMMANDS_DIRS, TERM_COLORS, CHECK_SCRIPT_GIT_STATUS from django_scripts_tracker.tracker_commons import is_script_ignored,", "get_hash(script_path) AppliedManagementScripts.objects.create(file_path=script_path, file_hash=hash_) def tracked_script(decorated_func): \"\"\" Decorator which logs management scripts executions \"\"\"", "django_scripts_tracker.tracker_commons import is_script_ignored, is_tracked_script, filter_unapplied_scripts from django_scripts_tracker.utils import get_hash, get_script_path, print_scripts def mark_script_as_applied(script_path):", "django_scripts_tracker.settings import COMMANDS_DIRS, TERM_COLORS, CHECK_SCRIPT_GIT_STATUS from django_scripts_tracker.tracker_commons import is_script_ignored, is_tracked_script, filter_unapplied_scripts from django_scripts_tracker.utils", "be tracked for modifications \"\"\" script_paths = [] for cmd_dir in cmd_dirs: for", "is present cmd_dir = os.path.join(*cmd_dir.split('/')) # convert system dependant slashes script_path = os.path.join(cmd_dir,", "cmd_dirs: for entry in os.listdir(cmd_dir): if not is_script_ignored(entry): # script file is not", "hasattr(settings, 'SCRIPTS_TRACKER') and settings.SCRIPTS_TRACKER \\ and settings.SCRIPTS_TRACKER.get('auto_run', False): scripts_dependencies = build_dependencies_dict(new_scripts + modified_scripts)", "new_scripts, modified_scripts = filter_unapplied_scripts(script_paths) return (new_scripts, modified_scripts) def print_new_and_modified_scripts(new_scripts, modified_scripts, show_dependencies=False): print('{LIGHT_CYAN}Checking management", "system dependant slashes script_path = os.path.join(cmd_dir, entry) if is_tracked_script(script_path): script_paths.append(script_path) return script_paths def", "<gh_stars>1-10 import inspect import os from functools import wraps from django_scripts_tracker.dependency_resolver import build_dependencies_dict,", "import is_script_ignored, is_tracked_script, filter_unapplied_scripts from django_scripts_tracker.utils import get_hash, get_script_path, print_scripts def mark_script_as_applied(script_path): hash_", "is_tracked_script, filter_unapplied_scripts from django_scripts_tracker.utils import get_hash, get_script_path, print_scripts def mark_script_as_applied(script_path): hash_ = get_hash(script_path)", "modified management scripts to be applied.'.format( new_count=len(new_scripts), mod_count=len(modified_scripts), **TERM_COLORS)) if len(new_scripts): print('{LIGHT_CYAN}New scripts", "import COMMANDS_DIRS, TERM_COLORS, CHECK_SCRIPT_GIT_STATUS from django_scripts_tracker.tracker_commons import is_script_ignored, is_tracked_script, filter_unapplied_scripts from django_scripts_tracker.utils import", "= get_hash(script_path) AppliedManagementScripts.objects.create(file_path=script_path, file_hash=hash_) def tracked_script(decorated_func): \"\"\" Decorator which logs management scripts executions", "{mod_count} modified management scripts to be applied.'.format( new_count=len(new_scripts), mod_count=len(modified_scripts), **TERM_COLORS)) if len(new_scripts): print('{LIGHT_CYAN}New", "import os from functools import wraps from django_scripts_tracker.dependency_resolver import build_dependencies_dict, print_dependencies, run_scripts from", "\"\"\" Decorator which logs management scripts executions \"\"\" @wraps(decorated_func) def wrapper(*args, **kwargs): result", "for modifications \"\"\" script_paths = [] for cmd_dir in cmd_dirs: for entry in", "modified_scripts = get_unapplied_scripts() print_new_and_modified_scripts(new_scripts, modified_scripts) if hasattr(settings, 'SCRIPTS_TRACKER') and settings.SCRIPTS_TRACKER \\ and settings.SCRIPTS_TRACKER.get('auto_run',", "from django_scripts_tracker.git_plugin import has_uncommited_changes from django_scripts_tracker.models import AppliedManagementScripts from django_scripts_tracker.settings import COMMANDS_DIRS, TERM_COLORS,", "\"\"\" @wraps(decorated_func) def wrapper(*args, **kwargs): result = decorated_func(*args, **kwargs) script_module = inspect.getmodule(decorated_func) script_path", "True return wrapper def _get_trackable_scripts(cmd_dirs): \"\"\" Returns a list of scripts (list of", "script_path = os.path.join(cmd_dir, entry) if is_tracked_script(script_path): script_paths.append(script_path) return script_paths def get_unapplied_scripts(): script_paths =", "' You have {new_count} new and {mod_count} modified management scripts to be applied.'.format(", "scripts ({count}):{NC}'.format(count=len(new_scripts), **TERM_COLORS)) print_scripts(new_scripts) if len(modified_scripts): print('{LIGHT_CYAN}Modified scripts ({count}):{NC}'.format(count=len(modified_scripts), **TERM_COLORS)) print_scripts(modified_scripts) if show_dependencies", "of script paths) that should be tracked for modifications \"\"\" script_paths = []", "\"\"\" Returns a list of scripts (list of script paths) that should be", "mark_script_as_applied(script_path) else: mark_script_as_applied(script_path) return result wrapper._is_tracked_script = True return wrapper def _get_trackable_scripts(cmd_dirs): \"\"\"", "print('{LIGHT_CYAN}New scripts ({count}):{NC}'.format(count=len(new_scripts), **TERM_COLORS)) print_scripts(new_scripts) if len(modified_scripts): print('{LIGHT_CYAN}Modified scripts ({count}):{NC}'.format(count=len(modified_scripts), **TERM_COLORS)) print_scripts(modified_scripts) if", "script_path = get_script_path(script_module) if CHECK_SCRIPT_GIT_STATUS: if not has_uncommited_changes(script_path): mark_script_as_applied(script_path) else: mark_script_as_applied(script_path) return result", "def print_new_and_modified_scripts(new_scripts, modified_scripts, show_dependencies=False): print('{LIGHT_CYAN}Checking management scripts:{NC}\\n' ' You have {new_count} new and", "executions \"\"\" @wraps(decorated_func) def wrapper(*args, **kwargs): result = decorated_func(*args, **kwargs) script_module = inspect.getmodule(decorated_func)", "print_scripts(new_scripts) if len(modified_scripts): print('{LIGHT_CYAN}Modified scripts ({count}):{NC}'.format(count=len(modified_scripts), **TERM_COLORS)) print_scripts(modified_scripts) if show_dependencies and len(new_scripts +", "show_dependencies and len(new_scripts + modified_scripts): scripts_dependencies = build_dependencies_dict(new_scripts + modified_scripts) print_dependencies(scripts_dependencies, prefix=' ')", "from django_scripts_tracker.dependency_resolver import build_dependencies_dict, print_dependencies, run_scripts from django_scripts_tracker.git_plugin import has_uncommited_changes from django_scripts_tracker.models import", "_get_trackable_scripts(cmd_dirs): \"\"\" Returns a list of scripts (list of script paths) that should", "Returns a list of scripts (list of script paths) that should be tracked", "= os.path.join(cmd_dir, entry) if is_tracked_script(script_path): script_paths.append(script_path) return script_paths def get_unapplied_scripts(): script_paths = _get_trackable_scripts(COMMANDS_DIRS)", "-> check if @tracked_script decorator is present cmd_dir = os.path.join(*cmd_dir.split('/')) # convert system", "# script file is not ignored -> check if @tracked_script decorator is present", "result wrapper._is_tracked_script = True return wrapper def _get_trackable_scripts(cmd_dirs): \"\"\" Returns a list of", "check_scripts_signal_handler(sender, **kwargs): from django.conf import settings new_scripts, modified_scripts = get_unapplied_scripts() print_new_and_modified_scripts(new_scripts, modified_scripts) if", "result = decorated_func(*args, **kwargs) script_module = inspect.getmodule(decorated_func) script_path = get_script_path(script_module) if CHECK_SCRIPT_GIT_STATUS: if", "def tracked_script(decorated_func): \"\"\" Decorator which logs management scripts executions \"\"\" @wraps(decorated_func) def wrapper(*args,", "if not is_script_ignored(entry): # script file is not ignored -> check if @tracked_script", "AppliedManagementScripts.objects.create(file_path=script_path, file_hash=hash_) def tracked_script(decorated_func): \"\"\" Decorator which logs management scripts executions \"\"\" @wraps(decorated_func)", "modified_scripts = filter_unapplied_scripts(script_paths) return (new_scripts, modified_scripts) def print_new_and_modified_scripts(new_scripts, modified_scripts, show_dependencies=False): print('{LIGHT_CYAN}Checking management scripts:{NC}\\n'", "script_paths = [] for cmd_dir in cmd_dirs: for entry in os.listdir(cmd_dir): if not", "**TERM_COLORS)) print_scripts(modified_scripts) if show_dependencies and len(new_scripts + modified_scripts): scripts_dependencies = build_dependencies_dict(new_scripts + modified_scripts)", "Decorator which logs management scripts executions \"\"\" @wraps(decorated_func) def wrapper(*args, **kwargs): result =", "import wraps from django_scripts_tracker.dependency_resolver import build_dependencies_dict, print_dependencies, run_scripts from django_scripts_tracker.git_plugin import has_uncommited_changes from", "os from functools import wraps from django_scripts_tracker.dependency_resolver import build_dependencies_dict, print_dependencies, run_scripts from django_scripts_tracker.git_plugin", "new_count=len(new_scripts), mod_count=len(modified_scripts), **TERM_COLORS)) if len(new_scripts): print('{LIGHT_CYAN}New scripts ({count}):{NC}'.format(count=len(new_scripts), **TERM_COLORS)) print_scripts(new_scripts) if len(modified_scripts): print('{LIGHT_CYAN}Modified", "mark_script_as_applied(script_path): hash_ = get_hash(script_path) AppliedManagementScripts.objects.create(file_path=script_path, file_hash=hash_) def tracked_script(decorated_func): \"\"\" Decorator which logs management", "= _get_trackable_scripts(COMMANDS_DIRS) new_scripts, modified_scripts = filter_unapplied_scripts(script_paths) return (new_scripts, modified_scripts) def print_new_and_modified_scripts(new_scripts, modified_scripts, show_dependencies=False):", "print_scripts(modified_scripts) if show_dependencies and len(new_scripts + modified_scripts): scripts_dependencies = build_dependencies_dict(new_scripts + modified_scripts) print_dependencies(scripts_dependencies,", "django_scripts_tracker.utils import get_hash, get_script_path, print_scripts def mark_script_as_applied(script_path): hash_ = get_hash(script_path) AppliedManagementScripts.objects.create(file_path=script_path, file_hash=hash_) def", "if show_dependencies and len(new_scripts + modified_scripts): scripts_dependencies = build_dependencies_dict(new_scripts + modified_scripts) print_dependencies(scripts_dependencies, prefix='", "from django_scripts_tracker.utils import get_hash, get_script_path, print_scripts def mark_script_as_applied(script_path): hash_ = get_hash(script_path) AppliedManagementScripts.objects.create(file_path=script_path, file_hash=hash_)", "from django_scripts_tracker.models import AppliedManagementScripts from django_scripts_tracker.settings import COMMANDS_DIRS, TERM_COLORS, CHECK_SCRIPT_GIT_STATUS from django_scripts_tracker.tracker_commons import", "in os.listdir(cmd_dir): if not is_script_ignored(entry): # script file is not ignored -> check", "script_paths = _get_trackable_scripts(COMMANDS_DIRS) new_scripts, modified_scripts = filter_unapplied_scripts(script_paths) return (new_scripts, modified_scripts) def print_new_and_modified_scripts(new_scripts, modified_scripts,", "print('{LIGHT_CYAN}Modified scripts ({count}):{NC}'.format(count=len(modified_scripts), **TERM_COLORS)) print_scripts(modified_scripts) if show_dependencies and len(new_scripts + modified_scripts): scripts_dependencies =", "scripts:{NC}\\n' ' You have {new_count} new and {mod_count} modified management scripts to be", "+ modified_scripts): scripts_dependencies = build_dependencies_dict(new_scripts + modified_scripts) print_dependencies(scripts_dependencies, prefix=' ') def check_scripts_signal_handler(sender, **kwargs):", "print_scripts def mark_script_as_applied(script_path): hash_ = get_hash(script_path) AppliedManagementScripts.objects.create(file_path=script_path, file_hash=hash_) def tracked_script(decorated_func): \"\"\" Decorator which", "entry) if is_tracked_script(script_path): script_paths.append(script_path) return script_paths def get_unapplied_scripts(): script_paths = _get_trackable_scripts(COMMANDS_DIRS) new_scripts, modified_scripts", "django_scripts_tracker.dependency_resolver import build_dependencies_dict, print_dependencies, run_scripts from django_scripts_tracker.git_plugin import has_uncommited_changes from django_scripts_tracker.models import AppliedManagementScripts", "modified_scripts) def print_new_and_modified_scripts(new_scripts, modified_scripts, show_dependencies=False): print('{LIGHT_CYAN}Checking management scripts:{NC}\\n' ' You have {new_count} new", "**kwargs): result = decorated_func(*args, **kwargs) script_module = inspect.getmodule(decorated_func) script_path = get_script_path(script_module) if CHECK_SCRIPT_GIT_STATUS:", "def _get_trackable_scripts(cmd_dirs): \"\"\" Returns a list of scripts (list of script paths) that", "wraps from django_scripts_tracker.dependency_resolver import build_dependencies_dict, print_dependencies, run_scripts from django_scripts_tracker.git_plugin import has_uncommited_changes from django_scripts_tracker.models", "if len(modified_scripts): print('{LIGHT_CYAN}Modified scripts ({count}):{NC}'.format(count=len(modified_scripts), **TERM_COLORS)) print_scripts(modified_scripts) if show_dependencies and len(new_scripts + modified_scripts):", "script_module = inspect.getmodule(decorated_func) script_path = get_script_path(script_module) if CHECK_SCRIPT_GIT_STATUS: if not has_uncommited_changes(script_path): mark_script_as_applied(script_path) else:", "a list of scripts (list of script paths) that should be tracked for", "check if @tracked_script decorator is present cmd_dir = os.path.join(*cmd_dir.split('/')) # convert system dependant", "from django_scripts_tracker.settings import COMMANDS_DIRS, TERM_COLORS, CHECK_SCRIPT_GIT_STATUS from django_scripts_tracker.tracker_commons import is_script_ignored, is_tracked_script, filter_unapplied_scripts from", "def mark_script_as_applied(script_path): hash_ = get_hash(script_path) AppliedManagementScripts.objects.create(file_path=script_path, file_hash=hash_) def tracked_script(decorated_func): \"\"\" Decorator which logs", "print_new_and_modified_scripts(new_scripts, modified_scripts, show_dependencies=False): print('{LIGHT_CYAN}Checking management scripts:{NC}\\n' ' You have {new_count} new and {mod_count}", "# convert system dependant slashes script_path = os.path.join(cmd_dir, entry) if is_tracked_script(script_path): script_paths.append(script_path) return", "len(modified_scripts): print('{LIGHT_CYAN}Modified scripts ({count}):{NC}'.format(count=len(modified_scripts), **TERM_COLORS)) print_scripts(modified_scripts) if show_dependencies and len(new_scripts + modified_scripts): scripts_dependencies", "os.path.join(*cmd_dir.split('/')) # convert system dependant slashes script_path = os.path.join(cmd_dir, entry) if is_tracked_script(script_path): script_paths.append(script_path)", "applied.'.format( new_count=len(new_scripts), mod_count=len(modified_scripts), **TERM_COLORS)) if len(new_scripts): print('{LIGHT_CYAN}New scripts ({count}):{NC}'.format(count=len(new_scripts), **TERM_COLORS)) print_scripts(new_scripts) if len(modified_scripts):", "has_uncommited_changes from django_scripts_tracker.models import AppliedManagementScripts from django_scripts_tracker.settings import COMMANDS_DIRS, TERM_COLORS, CHECK_SCRIPT_GIT_STATUS from django_scripts_tracker.tracker_commons", "def check_scripts_signal_handler(sender, **kwargs): from django.conf import settings new_scripts, modified_scripts = get_unapplied_scripts() print_new_and_modified_scripts(new_scripts, modified_scripts)", "print_new_and_modified_scripts(new_scripts, modified_scripts) if hasattr(settings, 'SCRIPTS_TRACKER') and settings.SCRIPTS_TRACKER \\ and settings.SCRIPTS_TRACKER.get('auto_run', False): scripts_dependencies =", "if is_tracked_script(script_path): script_paths.append(script_path) return script_paths def get_unapplied_scripts(): script_paths = _get_trackable_scripts(COMMANDS_DIRS) new_scripts, modified_scripts =", "build_dependencies_dict(new_scripts + modified_scripts) print_dependencies(scripts_dependencies, prefix=' ') def check_scripts_signal_handler(sender, **kwargs): from django.conf import settings", "def wrapper(*args, **kwargs): result = decorated_func(*args, **kwargs) script_module = inspect.getmodule(decorated_func) script_path = get_script_path(script_module)", "from django.conf import settings new_scripts, modified_scripts = get_unapplied_scripts() print_new_and_modified_scripts(new_scripts, modified_scripts) if hasattr(settings, 'SCRIPTS_TRACKER')", "'SCRIPTS_TRACKER') and settings.SCRIPTS_TRACKER \\ and settings.SCRIPTS_TRACKER.get('auto_run', False): scripts_dependencies = build_dependencies_dict(new_scripts + modified_scripts) run_scripts(scripts_dependencies,", "**TERM_COLORS)) print_scripts(new_scripts) if len(modified_scripts): print('{LIGHT_CYAN}Modified scripts ({count}):{NC}'.format(count=len(modified_scripts), **TERM_COLORS)) print_scripts(modified_scripts) if show_dependencies and len(new_scripts", "paths) that should be tracked for modifications \"\"\" script_paths = [] for cmd_dir", "**kwargs) script_module = inspect.getmodule(decorated_func) script_path = get_script_path(script_module) if CHECK_SCRIPT_GIT_STATUS: if not has_uncommited_changes(script_path): mark_script_as_applied(script_path)", "COMMANDS_DIRS, TERM_COLORS, CHECK_SCRIPT_GIT_STATUS from django_scripts_tracker.tracker_commons import is_script_ignored, is_tracked_script, filter_unapplied_scripts from django_scripts_tracker.utils import get_hash,", "[] for cmd_dir in cmd_dirs: for entry in os.listdir(cmd_dir): if not is_script_ignored(entry): #", "management scripts executions \"\"\" @wraps(decorated_func) def wrapper(*args, **kwargs): result = decorated_func(*args, **kwargs) script_module", "that should be tracked for modifications \"\"\" script_paths = [] for cmd_dir in", "slashes script_path = os.path.join(cmd_dir, entry) if is_tracked_script(script_path): script_paths.append(script_path) return script_paths def get_unapplied_scripts(): script_paths", "= get_unapplied_scripts() print_new_and_modified_scripts(new_scripts, modified_scripts) if hasattr(settings, 'SCRIPTS_TRACKER') and settings.SCRIPTS_TRACKER \\ and settings.SCRIPTS_TRACKER.get('auto_run', False):", "and settings.SCRIPTS_TRACKER \\ and settings.SCRIPTS_TRACKER.get('auto_run', False): scripts_dependencies = build_dependencies_dict(new_scripts + modified_scripts) run_scripts(scripts_dependencies, True)", "from functools import wraps from django_scripts_tracker.dependency_resolver import build_dependencies_dict, print_dependencies, run_scripts from django_scripts_tracker.git_plugin import", "present cmd_dir = os.path.join(*cmd_dir.split('/')) # convert system dependant slashes script_path = os.path.join(cmd_dir, entry)", "is_script_ignored, is_tracked_script, filter_unapplied_scripts from django_scripts_tracker.utils import get_hash, get_script_path, print_scripts def mark_script_as_applied(script_path): hash_ =", "TERM_COLORS, CHECK_SCRIPT_GIT_STATUS from django_scripts_tracker.tracker_commons import is_script_ignored, is_tracked_script, filter_unapplied_scripts from django_scripts_tracker.utils import get_hash, get_script_path,", "modified_scripts) if hasattr(settings, 'SCRIPTS_TRACKER') and settings.SCRIPTS_TRACKER \\ and settings.SCRIPTS_TRACKER.get('auto_run', False): scripts_dependencies = build_dependencies_dict(new_scripts", "(list of script paths) that should be tracked for modifications \"\"\" script_paths =", "management scripts to be applied.'.format( new_count=len(new_scripts), mod_count=len(modified_scripts), **TERM_COLORS)) if len(new_scripts): print('{LIGHT_CYAN}New scripts ({count}):{NC}'.format(count=len(new_scripts),", "import settings new_scripts, modified_scripts = get_unapplied_scripts() print_new_and_modified_scripts(new_scripts, modified_scripts) if hasattr(settings, 'SCRIPTS_TRACKER') and settings.SCRIPTS_TRACKER", "{new_count} new and {mod_count} modified management scripts to be applied.'.format( new_count=len(new_scripts), mod_count=len(modified_scripts), **TERM_COLORS))", "cmd_dir = os.path.join(*cmd_dir.split('/')) # convert system dependant slashes script_path = os.path.join(cmd_dir, entry) if", "wrapper(*args, **kwargs): result = decorated_func(*args, **kwargs) script_module = inspect.getmodule(decorated_func) script_path = get_script_path(script_module) if", "modified_scripts, show_dependencies=False): print('{LIGHT_CYAN}Checking management scripts:{NC}\\n' ' You have {new_count} new and {mod_count} modified" ]
[ "'5;' for the colours other than green to make them blink. The next", "colours = [31, 33, 34, 35, 36, 37] # Characters to use for", "strings as being more than one character long (15 & 10 for baubles", "the tree by 2. width += 2 # Put the characters for the", "the width of the tree by 2. width += 2 # Put the", "import choice from random import random def main(): \"\"\"Make the tree and print", "baubles and leaves).\"\"\" # Loop from (size - 1) down to 0, using", "codes. colours = [31, 33, 34, 35, 36, 37] # Characters to use", "\"nn\" is the width of the line), with these ansi codes. This is", "main(): \"\"\"Make the tree and print it.\"\"\" # If you change this, use", "2 lines and return. return tree + \"{0}{1}\\n\".format(' ' * (size - 1),", "Add a \"trunk\" of 2 lines and return. return tree + \"{0}{1}\\n\".format(' '", "with these. # The chr(169) and chr(174) characters may not work in all", "(extended ASCII, c and r in a circle). decs = ['@', '&', '*',", "christmas.py Prints a christmas tree on the terminal using coloured and blinking characters.", "string to the line, with padding. tree += \"{0}{1}\\n\".format(' ' * pad, temp)", "green. prob_gr = 0.6 # Colour codes. colours = [31, 33, 34, 35,", "leaves. if random() < prob_gr: temp += leaf # And also some baubles.", "of the tree by 2. width += 2 # Put the characters for", "the escape code. We pass '5;' for the colours other than green to", "character long (15 & 10 for baubles and leaves).\"\"\" # Loop from (size", "to make them blink. The next part is the colour code and the", "\"\"\" We can't use the normal \"format\" centering approach: (\"{:^nn}\".format(string) where \"nn\" is", "# Increase the width of the tree by 2. width += 2 #", "j in range(width): # Make some leaves. if random() < prob_gr: temp +=", "an odd number. SIZE = 21 print(makeTree(SIZE)) def makeTree(size): \"\"\"Creates the tree string.\"\"\"", "random def main(): \"\"\"Make the tree and print it.\"\"\" # If you change", "Characters to use for decorations. Experiment with these. # The chr(169) and chr(174)", "all terminals # (extended ASCII, c and r in a circle). decs =", "and chr(174) characters may not work in all terminals # (extended ASCII, c", "that a character will be green. prob_gr = 0.6 # Colour codes. colours", "lines and return. return tree + \"{0}{1}\\n\".format(' ' * (size - 1), \"000\")", "some leaves. if random() < prob_gr: temp += leaf # And also some", "0, using the counter as the padding size. for pad in range(size -", "pass \"\\033[0m\" after each character. Python 3 version by antiloquax (2015), based on", "\"\"\"Creates the tree string.\"\"\" # Probability that a character will be green. prob_gr", "Experiment with these. # The chr(169) and chr(174) characters may not work in", "2 each time. width = 1 # Initialise the tree string, with a", "by antiloquax (2015), based on code from datamungeblog.com. \"\"\" from random import choice", "blink_col.format(choice(colours), choice(decs)) # Add that string to the line, with padding. tree +=", "these. # The chr(169) and chr(174) characters may not work in all terminals", "with padding. tree += \"{0}{1}\\n\".format(' ' * pad, temp) # Add a \"trunk\"", "# And also some baubles. else: temp += blink_col.format(choice(colours), choice(decs)) # Add that", "star at the top. tree = \"\\n{}*\\n\".format(' ' * (size)) \"\"\" Main Loop", "leaf # And also some baubles. else: temp += blink_col.format(choice(colours), choice(decs)) # Add", "return tree + \"{0}{1}\\n\".format(' ' * (size - 1), \"000\") * 2 if", "0.6 # Colour codes. colours = [31, 33, 34, 35, 36, 37] #", "than one character long (15 & 10 for baubles and leaves).\"\"\" # Loop", "colours other than green to make them blink. The next part is the", "terminal using coloured and blinking characters. Uses ansi terminal escape sequences. The '\\033['", "def main(): \"\"\"Make the tree and print it.\"\"\" # If you change this,", "decorations. Experiment with these. # The chr(169) and chr(174) characters may not work", "grow by 2 each time. width = 1 # Initialise the tree string,", "line), with these ansi codes. This is because Python sees the strings as", "+= \"{0}{1}\\n\".format(' ' * pad, temp) # Add a \"trunk\" of 2 lines", "is the escape code. We pass '5;' for the colours other than green", "if random() < prob_gr: temp += leaf # And also some baubles. else:", "= 21 print(makeTree(SIZE)) def makeTree(size): \"\"\"Creates the tree string.\"\"\" # Probability that a", "to use for decorations. Experiment with these. # The chr(169) and chr(174) characters", "' * pad, temp) # Add a \"trunk\" of 2 lines and return.", "also some baubles. else: temp += blink_col.format(choice(colours), choice(decs)) # Add that string to", "c and r in a circle). decs = ['@', '&', '*', chr(169), chr(174)]", "width = 1 # Initialise the tree string, with a star at the", "string, with a star at the top. tree = \"\\n{}*\\n\".format(' ' * (size))", "chr(174) characters may not work in all terminals # (extended ASCII, c and", "Loop starts now.\"\"\" \"\"\" We can't use the normal \"format\" centering approach: (\"{:^nn}\".format(string)", "python \"\"\" christmas.py Prints a christmas tree on the terminal using coloured and", "top. tree = \"\\n{}*\\n\".format(' ' * (size)) \"\"\" Main Loop starts now.\"\"\" \"\"\"", "the colour we pass \"\\033[0m\" after each character. Python 3 version by antiloquax", "the tree string.\"\"\" # Probability that a character will be green. prob_gr =", "= 0.6 # Colour codes. colours = [31, 33, 34, 35, 36, 37]", "the terminal using coloured and blinking characters. Uses ansi terminal escape sequences. The", "with a star at the top. tree = \"\\n{}*\\n\".format(' ' * (size)) \"\"\"", "a green octothorpe ('#'). leaf = \"\\033[32m#\\033[0m\" # Width of the tree, will", "# The chr(169) and chr(174) characters may not work in all terminals #", "code. We pass '5;' for the colours other than green to make them", "# Put the characters for the line in \"temp\". temp = \"\" for", "# Width of the tree, will grow by 2 each time. width =", "Loop from (size - 1) down to 0, using the counter as the", "\"\"\"Make the tree and print it.\"\"\" # If you change this, use an", "christmas tree on the terminal using coloured and blinking characters. Uses ansi terminal", "colour we pass \"\\033[0m\" after each character. Python 3 version by antiloquax (2015),", "33, 34, 35, 36, 37] # Characters to use for decorations. Experiment with", "may not work in all terminals # (extended ASCII, c and r in", "a \"trunk\" of 2 lines and return. return tree + \"{0}{1}\\n\".format(' ' *", "to print a green octothorpe ('#'). leaf = \"\\033[32m#\\033[0m\" # Width of the", "the sequence. To reset the colour we pass \"\\033[0m\" after each character. Python", "1 # Initialise the tree string, with a star at the top. tree", "green to make them blink. The next part is the colour code and", "import random def main(): \"\"\"Make the tree and print it.\"\"\" # If you", "def makeTree(size): \"\"\"Creates the tree string.\"\"\" # Probability that a character will be", "# Format string for printing blinking characters. blink_col = \"\\033[5;{0}m{1}\\033[0m\" # String to", "for baubles and leaves).\"\"\" # Loop from (size - 1) down to 0,", "chr(174)] # Format string for printing blinking characters. blink_col = \"\\033[5;{0}m{1}\\033[0m\" # String", "# If you change this, use an odd number. SIZE = 21 print(makeTree(SIZE))", "SIZE = 21 print(makeTree(SIZE)) def makeTree(size): \"\"\"Creates the tree string.\"\"\" # Probability that", "tree string, with a star at the top. tree = \"\\n{}*\\n\".format(' ' *", "now.\"\"\" \"\"\" We can't use the normal \"format\" centering approach: (\"{:^nn}\".format(string) where \"nn\"", "'\\033[' part is the escape code. We pass '5;' for the colours other", "will grow by 2 each time. width = 1 # Initialise the tree", "- 1) down to 0, using the counter as the padding size. for", "character. Python 3 version by antiloquax (2015), based on code from datamungeblog.com. \"\"\"", "+= leaf # And also some baubles. else: temp += blink_col.format(choice(colours), choice(decs)) #", "tree by 2. width += 2 # Put the characters for the line", "'&', '*', chr(169), chr(174)] # Format string for printing blinking characters. blink_col =", "# Make some leaves. if random() < prob_gr: temp += leaf # And", "& 10 for baubles and leaves).\"\"\" # Loop from (size - 1) down", "choice(decs)) # Add that string to the line, with padding. tree += \"{0}{1}\\n\".format('", "down to 0, using the counter as the padding size. for pad in", "pad in range(size - 1, -1, -1): # Increase the width of the", "\"\\033[32m#\\033[0m\" # Width of the tree, will grow by 2 each time. width", "ends the sequence. To reset the colour we pass \"\\033[0m\" after each character.", "the characters for the line in \"temp\". temp = \"\" for j in", "range(width): # Make some leaves. if random() < prob_gr: temp += leaf #", "make them blink. The next part is the colour code and the 'm'", "being more than one character long (15 & 10 for baubles and leaves).\"\"\"", "# Add a \"trunk\" of 2 lines and return. return tree + \"{0}{1}\\n\".format('", "part is the escape code. We pass '5;' for the colours other than", "'m' ends the sequence. To reset the colour we pass \"\\033[0m\" after each", "1) down to 0, using the counter as the padding size. for pad", "(2015), based on code from datamungeblog.com. \"\"\" from random import choice from random", "characters. Uses ansi terminal escape sequences. The '\\033[' part is the escape code.", "width of the line), with these ansi codes. This is because Python sees", "= [31, 33, 34, 35, 36, 37] # Characters to use for decorations.", "not work in all terminals # (extended ASCII, c and r in a", "Probability that a character will be green. prob_gr = 0.6 # Colour codes.", "to 0, using the counter as the padding size. for pad in range(size", "['@', '&', '*', chr(169), chr(174)] # Format string for printing blinking characters. blink_col", "line in \"temp\". temp = \"\" for j in range(width): # Make some", "escape sequences. The '\\033[' part is the escape code. We pass '5;' for", "padding. tree += \"{0}{1}\\n\".format(' ' * pad, temp) # Add a \"trunk\" of", "= 1 # Initialise the tree string, with a star at the top.", "temp += leaf # And also some baubles. else: temp += blink_col.format(choice(colours), choice(decs))", "version by antiloquax (2015), based on code from datamungeblog.com. \"\"\" from random import", "for decorations. Experiment with these. # The chr(169) and chr(174) characters may not", "leaves).\"\"\" # Loop from (size - 1) down to 0, using the counter", "be green. prob_gr = 0.6 # Colour codes. colours = [31, 33, 34,", "# Characters to use for decorations. Experiment with these. # The chr(169) and", "Increase the width of the tree by 2. width += 2 # Put", "If you change this, use an odd number. SIZE = 21 print(makeTree(SIZE)) def", "To reset the colour we pass \"\\033[0m\" after each character. Python 3 version", "will be green. prob_gr = 0.6 # Colour codes. colours = [31, 33,", "normal \"format\" centering approach: (\"{:^nn}\".format(string) where \"nn\" is the width of the line),", "by 2. width += 2 # Put the characters for the line in", "3 version by antiloquax (2015), based on code from datamungeblog.com. \"\"\" from random", "and blinking characters. Uses ansi terminal escape sequences. The '\\033[' part is the", "we pass \"\\033[0m\" after each character. Python 3 version by antiloquax (2015), based", "The chr(169) and chr(174) characters may not work in all terminals # (extended", "return. return tree + \"{0}{1}\\n\".format(' ' * (size - 1), \"000\") * 2", "prob_gr: temp += leaf # And also some baubles. else: temp += blink_col.format(choice(colours),", "\"trunk\" of 2 lines and return. return tree + \"{0}{1}\\n\".format(' ' * (size", "r in a circle). decs = ['@', '&', '*', chr(169), chr(174)] # Format", "37] # Characters to use for decorations. Experiment with these. # The chr(169)", "from (size - 1) down to 0, using the counter as the padding", "using the counter as the padding size. for pad in range(size - 1,", "= \"\" for j in range(width): # Make some leaves. if random() <", "random() < prob_gr: temp += leaf # And also some baubles. else: temp", "the 'm' ends the sequence. To reset the colour we pass \"\\033[0m\" after", "with these ansi codes. This is because Python sees the strings as being", "blinking characters. Uses ansi terminal escape sequences. The '\\033[' part is the escape", "\"{0}{1}\\n\".format(' ' * pad, temp) # Add a \"trunk\" of 2 lines and", "code and the 'm' ends the sequence. To reset the colour we pass", "octothorpe ('#'). leaf = \"\\033[32m#\\033[0m\" # Width of the tree, will grow by", "choice from random import random def main(): \"\"\"Make the tree and print it.\"\"\"", "of 2 lines and return. return tree + \"{0}{1}\\n\".format(' ' * (size -", "coloured and blinking characters. Uses ansi terminal escape sequences. The '\\033[' part is", "(size - 1) down to 0, using the counter as the padding size.", "next part is the colour code and the 'm' ends the sequence. To", "size. for pad in range(size - 1, -1, -1): # Increase the width", "characters may not work in all terminals # (extended ASCII, c and r", "circle). decs = ['@', '&', '*', chr(169), chr(174)] # Format string for printing", "the tree, will grow by 2 each time. width = 1 # Initialise", "approach: (\"{:^nn}\".format(string) where \"nn\" is the width of the line), with these ansi", "' * (size)) \"\"\" Main Loop starts now.\"\"\" \"\"\" We can't use the", "The next part is the colour code and the 'm' ends the sequence.", "use for decorations. Experiment with these. # The chr(169) and chr(174) characters may", "' * (size - 1), \"000\") * 2 if __name__ == \"__main__\": main()", "Initialise the tree string, with a star at the top. tree = \"\\n{}*\\n\".format('", "# String to print a green octothorpe ('#'). leaf = \"\\033[32m#\\033[0m\" # Width", "by 2 each time. width = 1 # Initialise the tree string, with", "on the terminal using coloured and blinking characters. Uses ansi terminal escape sequences.", "('#'). leaf = \"\\033[32m#\\033[0m\" # Width of the tree, will grow by 2", "this, use an odd number. SIZE = 21 print(makeTree(SIZE)) def makeTree(size): \"\"\"Creates the", "other than green to make them blink. The next part is the colour", "to the line, with padding. tree += \"{0}{1}\\n\".format(' ' * pad, temp) #", "for j in range(width): # Make some leaves. if random() < prob_gr: temp", "part is the colour code and the 'm' ends the sequence. To reset", "\"{0}{1}\\n\".format(' ' * (size - 1), \"000\") * 2 if __name__ == \"__main__\":", "makeTree(size): \"\"\"Creates the tree string.\"\"\" # Probability that a character will be green.", "blinking characters. blink_col = \"\\033[5;{0}m{1}\\033[0m\" # String to print a green octothorpe ('#').", "36, 37] # Characters to use for decorations. Experiment with these. # The", "- 1, -1, -1): # Increase the width of the tree by 2.", "on code from datamungeblog.com. \"\"\" from random import choice from random import random", "for the colours other than green to make them blink. The next part", "and r in a circle). decs = ['@', '&', '*', chr(169), chr(174)] #", "Width of the tree, will grow by 2 each time. width = 1", "Format string for printing blinking characters. blink_col = \"\\033[5;{0}m{1}\\033[0m\" # String to print", "2. width += 2 # Put the characters for the line in \"temp\".", "Prints a christmas tree on the terminal using coloured and blinking characters. Uses", "ansi terminal escape sequences. The '\\033[' part is the escape code. We pass", "in a circle). decs = ['@', '&', '*', chr(169), chr(174)] # Format string", "\"\\033[5;{0}m{1}\\033[0m\" # String to print a green octothorpe ('#'). leaf = \"\\033[32m#\\033[0m\" #", "reset the colour we pass \"\\033[0m\" after each character. Python 3 version by", "blink. The next part is the colour code and the 'm' ends the", "temp) # Add a \"trunk\" of 2 lines and return. return tree +", "sees the strings as being more than one character long (15 & 10", "We pass '5;' for the colours other than green to make them blink.", "= \"\\n{}*\\n\".format(' ' * (size)) \"\"\" Main Loop starts now.\"\"\" \"\"\" We can't", "pad, temp) # Add a \"trunk\" of 2 lines and return. return tree", "for pad in range(size - 1, -1, -1): # Increase the width of", "print it.\"\"\" # If you change this, use an odd number. SIZE =", "escape code. We pass '5;' for the colours other than green to make", "tree = \"\\n{}*\\n\".format(' ' * (size)) \"\"\" Main Loop starts now.\"\"\" \"\"\" We", "the line in \"temp\". temp = \"\" for j in range(width): # Make", "based on code from datamungeblog.com. \"\"\" from random import choice from random import", "line, with padding. tree += \"{0}{1}\\n\".format(' ' * pad, temp) # Add a", "\"\"\" from random import choice from random import random def main(): \"\"\"Make the", "ansi codes. This is because Python sees the strings as being more than", "because Python sees the strings as being more than one character long (15", "tree, will grow by 2 each time. width = 1 # Initialise the", "is the colour code and the 'm' ends the sequence. To reset the", "pass '5;' for the colours other than green to make them blink. The", "-1): # Increase the width of the tree by 2. width += 2", "35, 36, 37] # Characters to use for decorations. Experiment with these. #", "of the line), with these ansi codes. This is because Python sees the", "padding size. for pad in range(size - 1, -1, -1): # Increase the", "the tree and print it.\"\"\" # If you change this, use an odd", "chr(169), chr(174)] # Format string for printing blinking characters. blink_col = \"\\033[5;{0}m{1}\\033[0m\" #", "you change this, use an odd number. SIZE = 21 print(makeTree(SIZE)) def makeTree(size):", "terminals # (extended ASCII, c and r in a circle). decs = ['@',", "characters for the line in \"temp\". temp = \"\" for j in range(width):", "2 # Put the characters for the line in \"temp\". temp = \"\"", "odd number. SIZE = 21 print(makeTree(SIZE)) def makeTree(size): \"\"\"Creates the tree string.\"\"\" #", "'*', chr(169), chr(174)] # Format string for printing blinking characters. blink_col = \"\\033[5;{0}m{1}\\033[0m\"", "We can't use the normal \"format\" centering approach: (\"{:^nn}\".format(string) where \"nn\" is the", "= \"\\033[32m#\\033[0m\" # Width of the tree, will grow by 2 each time.", "(15 & 10 for baubles and leaves).\"\"\" # Loop from (size - 1)", "Python 3 version by antiloquax (2015), based on code from datamungeblog.com. \"\"\" from", "prob_gr = 0.6 # Colour codes. colours = [31, 33, 34, 35, 36,", "# Add that string to the line, with padding. tree += \"{0}{1}\\n\".format(' '", "from random import random def main(): \"\"\"Make the tree and print it.\"\"\" #", "sequences. The '\\033[' part is the escape code. We pass '5;' for the", "# Colour codes. colours = [31, 33, 34, 35, 36, 37] # Characters", "\"temp\". temp = \"\" for j in range(width): # Make some leaves. if", "time. width = 1 # Initialise the tree string, with a star at", "each time. width = 1 # Initialise the tree string, with a star", "each character. Python 3 version by antiloquax (2015), based on code from datamungeblog.com.", "of the tree, will grow by 2 each time. width = 1 #", "datamungeblog.com. \"\"\" from random import choice from random import random def main(): \"\"\"Make", "else: temp += blink_col.format(choice(colours), choice(decs)) # Add that string to the line, with", "(\"{:^nn}\".format(string) where \"nn\" is the width of the line), with these ansi codes.", "in range(size - 1, -1, -1): # Increase the width of the tree", "the line, with padding. tree += \"{0}{1}\\n\".format(' ' * pad, temp) # Add", "and return. return tree + \"{0}{1}\\n\".format(' ' * (size - 1), \"000\") *", "at the top. tree = \"\\n{}*\\n\".format(' ' * (size)) \"\"\" Main Loop starts", "\"\" for j in range(width): # Make some leaves. if random() < prob_gr:", "decs = ['@', '&', '*', chr(169), chr(174)] # Format string for printing blinking", "tree += \"{0}{1}\\n\".format(' ' * pad, temp) # Add a \"trunk\" of 2", "the tree string, with a star at the top. tree = \"\\n{}*\\n\".format(' '", "a character will be green. prob_gr = 0.6 # Colour codes. colours =", "\"\\n{}*\\n\".format(' ' * (size)) \"\"\" Main Loop starts now.\"\"\" \"\"\" We can't use", "And also some baubles. else: temp += blink_col.format(choice(colours), choice(decs)) # Add that string", "character will be green. prob_gr = 0.6 # Colour codes. colours = [31,", "and leaves).\"\"\" # Loop from (size - 1) down to 0, using the", "and print it.\"\"\" # If you change this, use an odd number. SIZE", "String to print a green octothorpe ('#'). leaf = \"\\033[32m#\\033[0m\" # Width of", "a circle). decs = ['@', '&', '*', chr(169), chr(174)] # Format string for", "can't use the normal \"format\" centering approach: (\"{:^nn}\".format(string) where \"nn\" is the width", "print(makeTree(SIZE)) def makeTree(size): \"\"\"Creates the tree string.\"\"\" # Probability that a character will", "the top. tree = \"\\n{}*\\n\".format(' ' * (size)) \"\"\" Main Loop starts now.\"\"\"", "= \"\\033[5;{0}m{1}\\033[0m\" # String to print a green octothorpe ('#'). leaf = \"\\033[32m#\\033[0m\"", "\"\\033[0m\" after each character. Python 3 version by antiloquax (2015), based on code", "a star at the top. tree = \"\\n{}*\\n\".format(' ' * (size)) \"\"\" Main", "temp += blink_col.format(choice(colours), choice(decs)) # Add that string to the line, with padding.", "width of the tree by 2. width += 2 # Put the characters", "1, -1, -1): # Increase the width of the tree by 2. width", "and the 'm' ends the sequence. To reset the colour we pass \"\\033[0m\"", "from datamungeblog.com. \"\"\" from random import choice from random import random def main():", "the strings as being more than one character long (15 & 10 for", "tree + \"{0}{1}\\n\".format(' ' * (size - 1), \"000\") * 2 if __name__", "more than one character long (15 & 10 for baubles and leaves).\"\"\" #", "baubles. else: temp += blink_col.format(choice(colours), choice(decs)) # Add that string to the line,", "# Probability that a character will be green. prob_gr = 0.6 # Colour", "10 for baubles and leaves).\"\"\" # Loop from (size - 1) down to", "the padding size. for pad in range(size - 1, -1, -1): # Increase", "Add that string to the line, with padding. tree += \"{0}{1}\\n\".format(' ' *", "it.\"\"\" # If you change this, use an odd number. SIZE = 21", "< prob_gr: temp += leaf # And also some baubles. else: temp +=", "chr(169) and chr(174) characters may not work in all terminals # (extended ASCII,", "the normal \"format\" centering approach: (\"{:^nn}\".format(string) where \"nn\" is the width of the", "+ \"{0}{1}\\n\".format(' ' * (size - 1), \"000\") * 2 if __name__ ==", "that string to the line, with padding. tree += \"{0}{1}\\n\".format(' ' * pad,", "for printing blinking characters. blink_col = \"\\033[5;{0}m{1}\\033[0m\" # String to print a green", "blink_col = \"\\033[5;{0}m{1}\\033[0m\" # String to print a green octothorpe ('#'). leaf =", "work in all terminals # (extended ASCII, c and r in a circle).", "[31, 33, 34, 35, 36, 37] # Characters to use for decorations. Experiment", "tree and print it.\"\"\" # If you change this, use an odd number.", "+= blink_col.format(choice(colours), choice(decs)) # Add that string to the line, with padding. tree", "string.\"\"\" # Probability that a character will be green. prob_gr = 0.6 #", "after each character. Python 3 version by antiloquax (2015), based on code from", "use an odd number. SIZE = 21 print(makeTree(SIZE)) def makeTree(size): \"\"\"Creates the tree", "The '\\033[' part is the escape code. We pass '5;' for the colours", "ASCII, c and r in a circle). decs = ['@', '&', '*', chr(169),", "as the padding size. for pad in range(size - 1, -1, -1): #", "tree string.\"\"\" # Probability that a character will be green. prob_gr = 0.6", "#!/usr/bin/env python \"\"\" christmas.py Prints a christmas tree on the terminal using coloured", "\"\"\" christmas.py Prints a christmas tree on the terminal using coloured and blinking", "use the normal \"format\" centering approach: (\"{:^nn}\".format(string) where \"nn\" is the width of", "string for printing blinking characters. blink_col = \"\\033[5;{0}m{1}\\033[0m\" # String to print a", "long (15 & 10 for baubles and leaves).\"\"\" # Loop from (size -", "terminal escape sequences. The '\\033[' part is the escape code. We pass '5;'", "is the width of the line), with these ansi codes. This is because", "Main Loop starts now.\"\"\" \"\"\" We can't use the normal \"format\" centering approach:", "them blink. The next part is the colour code and the 'm' ends", "Uses ansi terminal escape sequences. The '\\033[' part is the escape code. We", "random import choice from random import random def main(): \"\"\"Make the tree and", "as being more than one character long (15 & 10 for baubles and", "using coloured and blinking characters. Uses ansi terminal escape sequences. The '\\033[' part", "random import random def main(): \"\"\"Make the tree and print it.\"\"\" # If", "where \"nn\" is the width of the line), with these ansi codes. This", "antiloquax (2015), based on code from datamungeblog.com. \"\"\" from random import choice from", "in range(width): # Make some leaves. if random() < prob_gr: temp += leaf", "width += 2 # Put the characters for the line in \"temp\". temp", "tree on the terminal using coloured and blinking characters. Uses ansi terminal escape", "\"format\" centering approach: (\"{:^nn}\".format(string) where \"nn\" is the width of the line), with", "than green to make them blink. The next part is the colour code", "in \"temp\". temp = \"\" for j in range(width): # Make some leaves.", "centering approach: (\"{:^nn}\".format(string) where \"nn\" is the width of the line), with these", "Make some leaves. if random() < prob_gr: temp += leaf # And also", "code from datamungeblog.com. \"\"\" from random import choice from random import random def", "a christmas tree on the terminal using coloured and blinking characters. Uses ansi", "(size)) \"\"\" Main Loop starts now.\"\"\" \"\"\" We can't use the normal \"format\"", "the width of the line), with these ansi codes. This is because Python", "34, 35, 36, 37] # Characters to use for decorations. Experiment with these.", "temp = \"\" for j in range(width): # Make some leaves. if random()", "codes. This is because Python sees the strings as being more than one", "# (extended ASCII, c and r in a circle). decs = ['@', '&',", "# Loop from (size - 1) down to 0, using the counter as", "some baubles. else: temp += blink_col.format(choice(colours), choice(decs)) # Add that string to the", "\"\"\" Main Loop starts now.\"\"\" \"\"\" We can't use the normal \"format\" centering", "in all terminals # (extended ASCII, c and r in a circle). decs", "the line), with these ansi codes. This is because Python sees the strings", "Put the characters for the line in \"temp\". temp = \"\" for j", "* (size)) \"\"\" Main Loop starts now.\"\"\" \"\"\" We can't use the normal", "from random import choice from random import random def main(): \"\"\"Make the tree", "leaf = \"\\033[32m#\\033[0m\" # Width of the tree, will grow by 2 each", "# Initialise the tree string, with a star at the top. tree =", "Python sees the strings as being more than one character long (15 &", "for the line in \"temp\". temp = \"\" for j in range(width): #", "21 print(makeTree(SIZE)) def makeTree(size): \"\"\"Creates the tree string.\"\"\" # Probability that a character", "characters. blink_col = \"\\033[5;{0}m{1}\\033[0m\" # String to print a green octothorpe ('#'). leaf", "range(size - 1, -1, -1): # Increase the width of the tree by", "= ['@', '&', '*', chr(169), chr(174)] # Format string for printing blinking characters.", "This is because Python sees the strings as being more than one character", "the colours other than green to make them blink. The next part is", "counter as the padding size. for pad in range(size - 1, -1, -1):", "the colour code and the 'm' ends the sequence. To reset the colour", "-1, -1): # Increase the width of the tree by 2. width +=", "number. SIZE = 21 print(makeTree(SIZE)) def makeTree(size): \"\"\"Creates the tree string.\"\"\" # Probability", "* pad, temp) # Add a \"trunk\" of 2 lines and return. return", "the counter as the padding size. for pad in range(size - 1, -1,", "one character long (15 & 10 for baubles and leaves).\"\"\" # Loop from", "change this, use an odd number. SIZE = 21 print(makeTree(SIZE)) def makeTree(size): \"\"\"Creates", "print a green octothorpe ('#'). leaf = \"\\033[32m#\\033[0m\" # Width of the tree,", "starts now.\"\"\" \"\"\" We can't use the normal \"format\" centering approach: (\"{:^nn}\".format(string) where", "Colour codes. colours = [31, 33, 34, 35, 36, 37] # Characters to", "printing blinking characters. blink_col = \"\\033[5;{0}m{1}\\033[0m\" # String to print a green octothorpe", "green octothorpe ('#'). leaf = \"\\033[32m#\\033[0m\" # Width of the tree, will grow", "colour code and the 'm' ends the sequence. To reset the colour we", "sequence. To reset the colour we pass \"\\033[0m\" after each character. Python 3", "these ansi codes. This is because Python sees the strings as being more", "+= 2 # Put the characters for the line in \"temp\". temp =", "is because Python sees the strings as being more than one character long" ]
[ "start_time = itemgetter('start_seconds_ago') return dict((job, min(runs, key=start_time)) for (job, runs) in groupby(sorted(self.__request(), key=self.name),", "implements a check function \"\"\" # load plugins dependencies and store them locally", "information \"\"\" from itertools import groupby from operator import itemgetter from zmon_worker_monitor.adapters.ifunctionfactory_plugin import", "plugin depends 1 other plugin self.http_factory = None def configure(self, conf): \"\"\" Called", "return self.__http(self.url, **self.http_wrapper_params).json() def lastruns(self): start_time = itemgetter('start_seconds_ago') return dict((job, min(runs, key=start_time)) for", "class JobsFactory(IFunctionFactoryPlugin): def __init__(self): super(JobsFactory, self).__init__() # fields from dependencies: plugin depends 1", "self.http_factory: self.http_factory = plugin_manager.get_plugin_obj_by_name('http', 'Function') return propartial(JobsWrapper, http_wrapper=self.http_factory.create(factory_ctx), project=factory_ctx['entity'].get('name')) class JobsWrapper(object): def __init__(self,", "'https://deployctl.example.com/jobs/history.json/{}/{}'.format(environment, project) self.__http = http_wrapper self.http_wrapper_params = kwargs self.name = itemgetter('name') def __request(self):", "JobsFactory(IFunctionFactoryPlugin): def __init__(self): super(JobsFactory, self).__init__() # fields from dependencies: plugin depends 1 other", "factory_ctx): \"\"\" Automatically called to create the check function's object :param factory_ctx: (dict)", "to pass the [configuration] section in their plugin info file :param conf: configuration", "[configuration] section in their plugin info file :param conf: configuration dictionary \"\"\" return", "itertools import groupby from operator import itemgetter from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial from", "import itemgetter from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial from zmon_worker_monitor import plugin_manager class JobsFactory(IFunctionFactoryPlugin):", "import IFunctionFactoryPlugin, propartial from zmon_worker_monitor import plugin_manager class JobsFactory(IFunctionFactoryPlugin): def __init__(self): super(JobsFactory, self).__init__()", "Called after plugin is loaded to pass the [configuration] section in their plugin", "Zalando-specific function to query DeployCtl job information \"\"\" from itertools import groupby from", "that implements a check function \"\"\" # load plugins dependencies and store them", "create the check function's object :param factory_ctx: (dict) names available for Function instantiation", "locally for efficiency if not self.http_factory: self.http_factory = plugin_manager.get_plugin_obj_by_name('http', 'Function') return propartial(JobsWrapper, http_wrapper=self.http_factory.create(factory_ctx),", "\"\"\" from itertools import groupby from operator import itemgetter from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin,", "from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial from zmon_worker_monitor import plugin_manager class JobsFactory(IFunctionFactoryPlugin): def __init__(self):", "key=self.name), key=self.name)) def history(self): return dict((job, list(runs)) for (job, runs) in groupby(sorted(self.__request(), key=self.name),", "Function instantiation :return: an object that implements a check function \"\"\" # load", "in their plugin info file :param conf: configuration dictionary \"\"\" return def create(self,", "-*- coding: utf-8 -*- \"\"\" Zalando-specific function to query DeployCtl job information \"\"\"", "groupby from operator import itemgetter from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial from zmon_worker_monitor import", "def __init__(self, http_wrapper, environment, project, **kwargs): self.url = 'https://deployctl.example.com/jobs/history.json/{}/{}'.format(environment, project) self.__http = http_wrapper", "from operator import itemgetter from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial from zmon_worker_monitor import plugin_manager", "propartial from zmon_worker_monitor import plugin_manager class JobsFactory(IFunctionFactoryPlugin): def __init__(self): super(JobsFactory, self).__init__() # fields", "DeployCtl job information \"\"\" from itertools import groupby from operator import itemgetter from", "dependencies: plugin depends 1 other plugin self.http_factory = None def configure(self, conf): \"\"\"", "other plugin self.http_factory = None def configure(self, conf): \"\"\" Called after plugin is", "= http_wrapper self.http_wrapper_params = kwargs self.name = itemgetter('name') def __request(self): return self.__http(self.url, **self.http_wrapper_params).json()", "__init__(self, http_wrapper, environment, project, **kwargs): self.url = 'https://deployctl.example.com/jobs/history.json/{}/{}'.format(environment, project) self.__http = http_wrapper self.http_wrapper_params", "import plugin_manager class JobsFactory(IFunctionFactoryPlugin): def __init__(self): super(JobsFactory, self).__init__() # fields from dependencies: plugin", "called to create the check function's object :param factory_ctx: (dict) names available for", "for efficiency if not self.http_factory: self.http_factory = plugin_manager.get_plugin_obj_by_name('http', 'Function') return propartial(JobsWrapper, http_wrapper=self.http_factory.create(factory_ctx), project=factory_ctx['entity'].get('name'))", "project) self.__http = http_wrapper self.http_wrapper_params = kwargs self.name = itemgetter('name') def __request(self): return", "return dict((job, min(runs, key=start_time)) for (job, runs) in groupby(sorted(self.__request(), key=self.name), key=self.name)) def history(self):", "plugin info file :param conf: configuration dictionary \"\"\" return def create(self, factory_ctx): \"\"\"", "an object that implements a check function \"\"\" # load plugins dependencies and", "create(self, factory_ctx): \"\"\" Automatically called to create the check function's object :param factory_ctx:", "coding: utf-8 -*- \"\"\" Zalando-specific function to query DeployCtl job information \"\"\" from", "\"\"\" Automatically called to create the check function's object :param factory_ctx: (dict) names", "store them locally for efficiency if not self.http_factory: self.http_factory = plugin_manager.get_plugin_obj_by_name('http', 'Function') return", "= kwargs self.name = itemgetter('name') def __request(self): return self.__http(self.url, **self.http_wrapper_params).json() def lastruns(self): start_time", "self.http_factory = plugin_manager.get_plugin_obj_by_name('http', 'Function') return propartial(JobsWrapper, http_wrapper=self.http_factory.create(factory_ctx), project=factory_ctx['entity'].get('name')) class JobsWrapper(object): def __init__(self, http_wrapper,", "job information \"\"\" from itertools import groupby from operator import itemgetter from zmon_worker_monitor.adapters.ifunctionfactory_plugin", "http_wrapper self.http_wrapper_params = kwargs self.name = itemgetter('name') def __request(self): return self.__http(self.url, **self.http_wrapper_params).json() def", "plugin_manager class JobsFactory(IFunctionFactoryPlugin): def __init__(self): super(JobsFactory, self).__init__() # fields from dependencies: plugin depends", "for Function instantiation :return: an object that implements a check function \"\"\" #", "plugin self.http_factory = None def configure(self, conf): \"\"\" Called after plugin is loaded", "is loaded to pass the [configuration] section in their plugin info file :param", "__init__(self): super(JobsFactory, self).__init__() # fields from dependencies: plugin depends 1 other plugin self.http_factory", "from zmon_worker_monitor import plugin_manager class JobsFactory(IFunctionFactoryPlugin): def __init__(self): super(JobsFactory, self).__init__() # fields from", "them locally for efficiency if not self.http_factory: self.http_factory = plugin_manager.get_plugin_obj_by_name('http', 'Function') return propartial(JobsWrapper,", "from itertools import groupby from operator import itemgetter from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial", "groupby(sorted(self.__request(), key=self.name), key=self.name)) def history(self): return dict((job, list(runs)) for (job, runs) in groupby(sorted(self.__request(),", "-*- \"\"\" Zalando-specific function to query DeployCtl job information \"\"\" from itertools import", "their plugin info file :param conf: configuration dictionary \"\"\" return def create(self, factory_ctx):", "factory_ctx: (dict) names available for Function instantiation :return: an object that implements a", "function to query DeployCtl job information \"\"\" from itertools import groupby from operator", "section in their plugin info file :param conf: configuration dictionary \"\"\" return def", "self.http_wrapper_params = kwargs self.name = itemgetter('name') def __request(self): return self.__http(self.url, **self.http_wrapper_params).json() def lastruns(self):", "in groupby(sorted(self.__request(), key=self.name), key=self.name)) def history(self): return dict((job, list(runs)) for (job, runs) in", "key=self.name)) def history(self): return dict((job, list(runs)) for (job, runs) in groupby(sorted(self.__request(), key=self.name), key=self.name))", "= itemgetter('start_seconds_ago') return dict((job, min(runs, key=start_time)) for (job, runs) in groupby(sorted(self.__request(), key=self.name), key=self.name))", "available for Function instantiation :return: an object that implements a check function \"\"\"", "for (job, runs) in groupby(sorted(self.__request(), key=self.name), key=self.name)) def history(self): return dict((job, list(runs)) for", "to query DeployCtl job information \"\"\" from itertools import groupby from operator import", "self.url = 'https://deployctl.example.com/jobs/history.json/{}/{}'.format(environment, project) self.__http = http_wrapper self.http_wrapper_params = kwargs self.name = itemgetter('name')", "zmon_worker_monitor import plugin_manager class JobsFactory(IFunctionFactoryPlugin): def __init__(self): super(JobsFactory, self).__init__() # fields from dependencies:", "check function's object :param factory_ctx: (dict) names available for Function instantiation :return: an", "itemgetter('start_seconds_ago') return dict((job, min(runs, key=start_time)) for (job, runs) in groupby(sorted(self.__request(), key=self.name), key=self.name)) def", "conf): \"\"\" Called after plugin is loaded to pass the [configuration] section in", "self.__http = http_wrapper self.http_wrapper_params = kwargs self.name = itemgetter('name') def __request(self): return self.__http(self.url,", "self.name = itemgetter('name') def __request(self): return self.__http(self.url, **self.http_wrapper_params).json() def lastruns(self): start_time = itemgetter('start_seconds_ago')", "= itemgetter('name') def __request(self): return self.__http(self.url, **self.http_wrapper_params).json() def lastruns(self): start_time = itemgetter('start_seconds_ago') return", "zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial from zmon_worker_monitor import plugin_manager class JobsFactory(IFunctionFactoryPlugin): def __init__(self): super(JobsFactory,", "**self.http_wrapper_params).json() def lastruns(self): start_time = itemgetter('start_seconds_ago') return dict((job, min(runs, key=start_time)) for (job, runs)", "object :param factory_ctx: (dict) names available for Function instantiation :return: an object that", "if not self.http_factory: self.http_factory = plugin_manager.get_plugin_obj_by_name('http', 'Function') return propartial(JobsWrapper, http_wrapper=self.http_factory.create(factory_ctx), project=factory_ctx['entity'].get('name')) class JobsWrapper(object):", "def __request(self): return self.__http(self.url, **self.http_wrapper_params).json() def lastruns(self): start_time = itemgetter('start_seconds_ago') return dict((job, min(runs,", "None def configure(self, conf): \"\"\" Called after plugin is loaded to pass the", "return def create(self, factory_ctx): \"\"\" Automatically called to create the check function's object", "python # -*- coding: utf-8 -*- \"\"\" Zalando-specific function to query DeployCtl job", "to create the check function's object :param factory_ctx: (dict) names available for Function", "Automatically called to create the check function's object :param factory_ctx: (dict) names available", "names available for Function instantiation :return: an object that implements a check function", "def create(self, factory_ctx): \"\"\" Automatically called to create the check function's object :param", "function \"\"\" # load plugins dependencies and store them locally for efficiency if", "configuration dictionary \"\"\" return def create(self, factory_ctx): \"\"\" Automatically called to create the", "environment, project, **kwargs): self.url = 'https://deployctl.example.com/jobs/history.json/{}/{}'.format(environment, project) self.__http = http_wrapper self.http_wrapper_params = kwargs", "info file :param conf: configuration dictionary \"\"\" return def create(self, factory_ctx): \"\"\" Automatically", "plugins dependencies and store them locally for efficiency if not self.http_factory: self.http_factory =", "lastruns(self): start_time = itemgetter('start_seconds_ago') return dict((job, min(runs, key=start_time)) for (job, runs) in groupby(sorted(self.__request(),", "project=factory_ctx['entity'].get('name')) class JobsWrapper(object): def __init__(self, http_wrapper, environment, project, **kwargs): self.url = 'https://deployctl.example.com/jobs/history.json/{}/{}'.format(environment, project)", "itemgetter('name') def __request(self): return self.__http(self.url, **self.http_wrapper_params).json() def lastruns(self): start_time = itemgetter('start_seconds_ago') return dict((job,", "**kwargs): self.url = 'https://deployctl.example.com/jobs/history.json/{}/{}'.format(environment, project) self.__http = http_wrapper self.http_wrapper_params = kwargs self.name =", "1 other plugin self.http_factory = None def configure(self, conf): \"\"\" Called after plugin", "not self.http_factory: self.http_factory = plugin_manager.get_plugin_obj_by_name('http', 'Function') return propartial(JobsWrapper, http_wrapper=self.http_factory.create(factory_ctx), project=factory_ctx['entity'].get('name')) class JobsWrapper(object): def", "utf-8 -*- \"\"\" Zalando-specific function to query DeployCtl job information \"\"\" from itertools", "pass the [configuration] section in their plugin info file :param conf: configuration dictionary", "self).__init__() # fields from dependencies: plugin depends 1 other plugin self.http_factory = None", ":return: an object that implements a check function \"\"\" # load plugins dependencies", "dictionary \"\"\" return def create(self, factory_ctx): \"\"\" Automatically called to create the check", "key=start_time)) for (job, runs) in groupby(sorted(self.__request(), key=self.name), key=self.name)) def history(self): return dict((job, list(runs))", "return propartial(JobsWrapper, http_wrapper=self.http_factory.create(factory_ctx), project=factory_ctx['entity'].get('name')) class JobsWrapper(object): def __init__(self, http_wrapper, environment, project, **kwargs): self.url", "the check function's object :param factory_ctx: (dict) names available for Function instantiation :return:", "a check function \"\"\" # load plugins dependencies and store them locally for", "\"\"\" return def create(self, factory_ctx): \"\"\" Automatically called to create the check function's", "load plugins dependencies and store them locally for efficiency if not self.http_factory: self.http_factory", "loaded to pass the [configuration] section in their plugin info file :param conf:", "(dict) names available for Function instantiation :return: an object that implements a check", "def lastruns(self): start_time = itemgetter('start_seconds_ago') return dict((job, min(runs, key=start_time)) for (job, runs) in", "import groupby from operator import itemgetter from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial from zmon_worker_monitor", "check function \"\"\" # load plugins dependencies and store them locally for efficiency", "instantiation :return: an object that implements a check function \"\"\" # load plugins", "<gh_stars>10-100 #!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\" Zalando-specific function to query", "and store them locally for efficiency if not self.http_factory: self.http_factory = plugin_manager.get_plugin_obj_by_name('http', 'Function')", "project, **kwargs): self.url = 'https://deployctl.example.com/jobs/history.json/{}/{}'.format(environment, project) self.__http = http_wrapper self.http_wrapper_params = kwargs self.name", "min(runs, key=start_time)) for (job, runs) in groupby(sorted(self.__request(), key=self.name), key=self.name)) def history(self): return dict((job,", "http_wrapper=self.http_factory.create(factory_ctx), project=factory_ctx['entity'].get('name')) class JobsWrapper(object): def __init__(self, http_wrapper, environment, project, **kwargs): self.url = 'https://deployctl.example.com/jobs/history.json/{}/{}'.format(environment,", "runs) in groupby(sorted(self.__request(), key=self.name), key=self.name)) def history(self): return dict((job, list(runs)) for (job, runs)", ":param conf: configuration dictionary \"\"\" return def create(self, factory_ctx): \"\"\" Automatically called to", "= None def configure(self, conf): \"\"\" Called after plugin is loaded to pass", "\"\"\" Called after plugin is loaded to pass the [configuration] section in their", "IFunctionFactoryPlugin, propartial from zmon_worker_monitor import plugin_manager class JobsFactory(IFunctionFactoryPlugin): def __init__(self): super(JobsFactory, self).__init__() #", "\"\"\" # load plugins dependencies and store them locally for efficiency if not", "plugin_manager.get_plugin_obj_by_name('http', 'Function') return propartial(JobsWrapper, http_wrapper=self.http_factory.create(factory_ctx), project=factory_ctx['entity'].get('name')) class JobsWrapper(object): def __init__(self, http_wrapper, environment, project,", "# fields from dependencies: plugin depends 1 other plugin self.http_factory = None def", "self.__http(self.url, **self.http_wrapper_params).json() def lastruns(self): start_time = itemgetter('start_seconds_ago') return dict((job, min(runs, key=start_time)) for (job,", "(job, runs) in groupby(sorted(self.__request(), key=self.name), key=self.name)) def history(self): return dict((job, list(runs)) for (job,", "object that implements a check function \"\"\" # load plugins dependencies and store", "plugin is loaded to pass the [configuration] section in their plugin info file", "= plugin_manager.get_plugin_obj_by_name('http', 'Function') return propartial(JobsWrapper, http_wrapper=self.http_factory.create(factory_ctx), project=factory_ctx['entity'].get('name')) class JobsWrapper(object): def __init__(self, http_wrapper, environment,", "configure(self, conf): \"\"\" Called after plugin is loaded to pass the [configuration] section", "kwargs self.name = itemgetter('name') def __request(self): return self.__http(self.url, **self.http_wrapper_params).json() def lastruns(self): start_time =", ":param factory_ctx: (dict) names available for Function instantiation :return: an object that implements", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\" Zalando-specific function to query DeployCtl", "\"\"\" Zalando-specific function to query DeployCtl job information \"\"\" from itertools import groupby", "itemgetter from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial from zmon_worker_monitor import plugin_manager class JobsFactory(IFunctionFactoryPlugin): def", "fields from dependencies: plugin depends 1 other plugin self.http_factory = None def configure(self,", "propartial(JobsWrapper, http_wrapper=self.http_factory.create(factory_ctx), project=factory_ctx['entity'].get('name')) class JobsWrapper(object): def __init__(self, http_wrapper, environment, project, **kwargs): self.url =", "'Function') return propartial(JobsWrapper, http_wrapper=self.http_factory.create(factory_ctx), project=factory_ctx['entity'].get('name')) class JobsWrapper(object): def __init__(self, http_wrapper, environment, project, **kwargs):", "depends 1 other plugin self.http_factory = None def configure(self, conf): \"\"\" Called after", "# -*- coding: utf-8 -*- \"\"\" Zalando-specific function to query DeployCtl job information", "file :param conf: configuration dictionary \"\"\" return def create(self, factory_ctx): \"\"\" Automatically called", "super(JobsFactory, self).__init__() # fields from dependencies: plugin depends 1 other plugin self.http_factory =", "= 'https://deployctl.example.com/jobs/history.json/{}/{}'.format(environment, project) self.__http = http_wrapper self.http_wrapper_params = kwargs self.name = itemgetter('name') def", "def __init__(self): super(JobsFactory, self).__init__() # fields from dependencies: plugin depends 1 other plugin", "function's object :param factory_ctx: (dict) names available for Function instantiation :return: an object", "the [configuration] section in their plugin info file :param conf: configuration dictionary \"\"\"", "class JobsWrapper(object): def __init__(self, http_wrapper, environment, project, **kwargs): self.url = 'https://deployctl.example.com/jobs/history.json/{}/{}'.format(environment, project) self.__http", "operator import itemgetter from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial from zmon_worker_monitor import plugin_manager class", "def configure(self, conf): \"\"\" Called after plugin is loaded to pass the [configuration]", "after plugin is loaded to pass the [configuration] section in their plugin info", "__request(self): return self.__http(self.url, **self.http_wrapper_params).json() def lastruns(self): start_time = itemgetter('start_seconds_ago') return dict((job, min(runs, key=start_time))", "dependencies and store them locally for efficiency if not self.http_factory: self.http_factory = plugin_manager.get_plugin_obj_by_name('http',", "conf: configuration dictionary \"\"\" return def create(self, factory_ctx): \"\"\" Automatically called to create", "# load plugins dependencies and store them locally for efficiency if not self.http_factory:", "efficiency if not self.http_factory: self.http_factory = plugin_manager.get_plugin_obj_by_name('http', 'Function') return propartial(JobsWrapper, http_wrapper=self.http_factory.create(factory_ctx), project=factory_ctx['entity'].get('name')) class", "dict((job, min(runs, key=start_time)) for (job, runs) in groupby(sorted(self.__request(), key=self.name), key=self.name)) def history(self): return", "query DeployCtl job information \"\"\" from itertools import groupby from operator import itemgetter", "self.http_factory = None def configure(self, conf): \"\"\" Called after plugin is loaded to", "JobsWrapper(object): def __init__(self, http_wrapper, environment, project, **kwargs): self.url = 'https://deployctl.example.com/jobs/history.json/{}/{}'.format(environment, project) self.__http =", "http_wrapper, environment, project, **kwargs): self.url = 'https://deployctl.example.com/jobs/history.json/{}/{}'.format(environment, project) self.__http = http_wrapper self.http_wrapper_params =", "from dependencies: plugin depends 1 other plugin self.http_factory = None def configure(self, conf):" ]
[ "05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450\"\"\".replace(\"\\n\", \"\").strip() search_range = 13 product = max([reduce(lambda x,y:int(x)*int(y),sub) for sub in", "functools import reduce num = \"\"\"73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776", "16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450\"\"\".replace(\"\\n\", \"\").strip() search_range = 13 product =", "<reponame>speyejack/EulersProblems from operator import itemgetter from functools import reduce num = \"\"\"73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843", "\"\").strip() search_range = 13 product = max([reduce(lambda x,y:int(x)*int(y),sub) for sub in [num[sub:sub+search_range] for", "import reduce num = \"\"\"73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243", "66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188", "53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450\"\"\".replace(\"\\n\", \"\").strip() search_range =", "reduce num = \"\"\"73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397", "65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450\"\"\".replace(\"\\n\", \"\").strip()", "96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586", "= 13 product = max([reduce(lambda x,y:int(x)*int(y),sub) for sub in [num[sub:sub+search_range] for sub in", "62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606", "itemgetter from functools import reduce num = \"\"\"73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749", "24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450\"\"\".replace(\"\\n\", \"\").strip() search_range = 13 product = max([reduce(lambda x,y:int(x)*int(y),sub)", "85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042", "import itemgetter from functools import reduce num = \"\"\"73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113", "17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450\"\"\".replace(\"\\n\", \"\").strip() search_range = 13 product = max([reduce(lambda", "num = \"\"\"73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482", "from operator import itemgetter from functools import reduce num = \"\"\"73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511", "13 product = max([reduce(lambda x,y:int(x)*int(y),sub) for sub in [num[sub:sub+search_range] for sub in range(len(num)-search_range)]])", "84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450\"\"\".replace(\"\\n\", \"\").strip() search_range = 13 product = max([reduce(lambda x,y:int(x)*int(y),sub) for sub", "operator import itemgetter from functools import reduce num = \"\"\"73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557", "71636269561882670428252483600823257530420752963450\"\"\".replace(\"\\n\", \"\").strip() search_range = 13 product = max([reduce(lambda x,y:int(x)*int(y),sub) for sub in [num[sub:sub+search_range]", "= \"\"\"73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474", "search_range = 13 product = max([reduce(lambda x,y:int(x)*int(y),sub) for sub in [num[sub:sub+search_range] for sub", "product = max([reduce(lambda x,y:int(x)*int(y),sub) for sub in [num[sub:sub+search_range] for sub in range(len(num)-search_range)]]) print(\"Greatest", "83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450\"\"\".replace(\"\\n\", \"\").strip() search_range = 13", "52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450\"\"\".replace(\"\\n\", \"\").strip() search_range", "from functools import reduce num = \"\"\"73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866", "07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450\"\"\".replace(\"\\n\", \"\").strip() search_range = 13 product = max([reduce(lambda x,y:int(x)*int(y),sub) for", "30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725", "\"\"\"73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881", "= max([reduce(lambda x,y:int(x)*int(y),sub) for sub in [num[sub:sub+search_range] for sub in range(len(num)-search_range)]]) print(\"Greatest product:", "12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408", "82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450\"\"\".replace(\"\\n\", \"\").strip() search_range = 13 product", "max([reduce(lambda x,y:int(x)*int(y),sub) for sub in [num[sub:sub+search_range] for sub in range(len(num)-search_range)]]) print(\"Greatest product: {}\".format(product))", "70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450\"\"\".replace(\"\\n\"," ]
[ "gateway(request): if request.method == 'POST': data = request.POST.get('value') data = json.loads(data) alg =", "== 'POST': data = request.POST.get('value') data = json.loads(data) alg = request.POST.get('algo') alg=json.loads(alg) print(data)", "alg=json.loads(alg) print(data) if(alg==\"CSCAN\"): result = cscan(data) elif(alg==\"CLOOK\"): result = clook(data) elif(alg==\"FCFS\"): result =", "elif(alg==\"SCAN\"): result = scan(data) elif(alg==\"LOOK\"): result = look(data) elif(alg==\"SSTF\"): result = sstf(data) print(result)", "models import DiskSchedAlg from . utils import cscan,clook, scan,look,sstf,fcfs def home(request): algos =", "import reverse from django.views.decorators.csrf import csrf_exempt import json from django.http import JsonResponse #", "= fcfs(data) elif(alg==\"SCAN\"): result = scan(data) elif(alg==\"LOOK\"): result = look(data) elif(alg==\"SSTF\"): result =", "print(data) if(alg==\"CSCAN\"): result = cscan(data) elif(alg==\"CLOOK\"): result = clook(data) elif(alg==\"FCFS\"): result = fcfs(data)", "context = {'algos': algos} return render(request, 'disk/index.html',context = context) def detail(request,pk): alg =", "import HttpResponseRedirect,HttpResponse from django.urls import reverse from django.views.decorators.csrf import csrf_exempt import json from", "= context) def detail(request,pk): alg = get_object_or_404(DiskSchedAlg, pk=pk) context = {'alg':alg, } return", "def detail(request,pk): alg = get_object_or_404(DiskSchedAlg, pk=pk) context = {'alg':alg, } return render(request,'disk/detail.html',context=context) def", "data = request.POST.get('value') data = json.loads(data) alg = request.POST.get('algo') alg=json.loads(alg) print(data) if(alg==\"CSCAN\"): result", "def demo(request): return render(request,'disk/disk.html') @csrf_exempt def gateway(request): if request.method == 'POST': data =", "alg = request.POST.get('algo') alg=json.loads(alg) print(data) if(alg==\"CSCAN\"): result = cscan(data) elif(alg==\"CLOOK\"): result = clook(data)", "django.http import JsonResponse # Create your views here. from . models import DiskSchedAlg", "request.method == 'POST': data = request.POST.get('value') data = json.loads(data) alg = request.POST.get('algo') alg=json.loads(alg)", "reverse from django.views.decorators.csrf import csrf_exempt import json from django.http import JsonResponse # Create", "} return render(request,'disk/detail.html',context=context) def demo(request): return render(request,'disk/disk.html') @csrf_exempt def gateway(request): if request.method ==", "utils import cscan,clook, scan,look,sstf,fcfs def home(request): algos = DiskSchedAlg.objects.all() context = {'algos': algos}", ". utils import cscan,clook, scan,look,sstf,fcfs def home(request): algos = DiskSchedAlg.objects.all() context = {'algos':", "render(request, 'disk/index.html',context = context) def detail(request,pk): alg = get_object_or_404(DiskSchedAlg, pk=pk) context = {'alg':alg,", "import csrf_exempt import json from django.http import JsonResponse # Create your views here.", "# Create your views here. from . models import DiskSchedAlg from . utils", "cscan,clook, scan,look,sstf,fcfs def home(request): algos = DiskSchedAlg.objects.all() context = {'algos': algos} return render(request,", "django.urls import reverse from django.views.decorators.csrf import csrf_exempt import json from django.http import JsonResponse", "= {'algos': algos} return render(request, 'disk/index.html',context = context) def detail(request,pk): alg = get_object_or_404(DiskSchedAlg,", "import JsonResponse # Create your views here. from . models import DiskSchedAlg from", "fcfs(data) elif(alg==\"SCAN\"): result = scan(data) elif(alg==\"LOOK\"): result = look(data) elif(alg==\"SSTF\"): result = sstf(data)", "get_object_or_404(DiskSchedAlg, pk=pk) context = {'alg':alg, } return render(request,'disk/detail.html',context=context) def demo(request): return render(request,'disk/disk.html') @csrf_exempt", "DiskSchedAlg from . utils import cscan,clook, scan,look,sstf,fcfs def home(request): algos = DiskSchedAlg.objects.all() context", "from django.shortcuts import get_object_or_404, render from django.http import HttpResponseRedirect,HttpResponse from django.urls import reverse", "django.http import HttpResponseRedirect,HttpResponse from django.urls import reverse from django.views.decorators.csrf import csrf_exempt import json", "JsonResponse # Create your views here. from . models import DiskSchedAlg from .", "Create your views here. from . models import DiskSchedAlg from . utils import", ". models import DiskSchedAlg from . utils import cscan,clook, scan,look,sstf,fcfs def home(request): algos", "alg = get_object_or_404(DiskSchedAlg, pk=pk) context = {'alg':alg, } return render(request,'disk/detail.html',context=context) def demo(request): return", "json.loads(data) alg = request.POST.get('algo') alg=json.loads(alg) print(data) if(alg==\"CSCAN\"): result = cscan(data) elif(alg==\"CLOOK\"): result =", "elif(alg==\"CLOOK\"): result = clook(data) elif(alg==\"FCFS\"): result = fcfs(data) elif(alg==\"SCAN\"): result = scan(data) elif(alg==\"LOOK\"):", "your views here. from . models import DiskSchedAlg from . utils import cscan,clook,", "= request.POST.get('algo') alg=json.loads(alg) print(data) if(alg==\"CSCAN\"): result = cscan(data) elif(alg==\"CLOOK\"): result = clook(data) elif(alg==\"FCFS\"):", "result = fcfs(data) elif(alg==\"SCAN\"): result = scan(data) elif(alg==\"LOOK\"): result = look(data) elif(alg==\"SSTF\"): result", "django.shortcuts import get_object_or_404, render from django.http import HttpResponseRedirect,HttpResponse from django.urls import reverse from", "render(request,'disk/detail.html',context=context) def demo(request): return render(request,'disk/disk.html') @csrf_exempt def gateway(request): if request.method == 'POST': data", "return render(request, 'disk/index.html',context = context) def detail(request,pk): alg = get_object_or_404(DiskSchedAlg, pk=pk) context =", "from . utils import cscan,clook, scan,look,sstf,fcfs def home(request): algos = DiskSchedAlg.objects.all() context =", "{'algos': algos} return render(request, 'disk/index.html',context = context) def detail(request,pk): alg = get_object_or_404(DiskSchedAlg, pk=pk)", "def gateway(request): if request.method == 'POST': data = request.POST.get('value') data = json.loads(data) alg", "import cscan,clook, scan,look,sstf,fcfs def home(request): algos = DiskSchedAlg.objects.all() context = {'algos': algos} return", "algos} return render(request, 'disk/index.html',context = context) def detail(request,pk): alg = get_object_or_404(DiskSchedAlg, pk=pk) context", "'disk/index.html',context = context) def detail(request,pk): alg = get_object_or_404(DiskSchedAlg, pk=pk) context = {'alg':alg, }", "if request.method == 'POST': data = request.POST.get('value') data = json.loads(data) alg = request.POST.get('algo')", "if(alg==\"CSCAN\"): result = cscan(data) elif(alg==\"CLOOK\"): result = clook(data) elif(alg==\"FCFS\"): result = fcfs(data) elif(alg==\"SCAN\"):", "data = json.loads(data) alg = request.POST.get('algo') alg=json.loads(alg) print(data) if(alg==\"CSCAN\"): result = cscan(data) elif(alg==\"CLOOK\"):", "home(request): algos = DiskSchedAlg.objects.all() context = {'algos': algos} return render(request, 'disk/index.html',context = context)", "clook(data) elif(alg==\"FCFS\"): result = fcfs(data) elif(alg==\"SCAN\"): result = scan(data) elif(alg==\"LOOK\"): result = look(data)", "csrf_exempt import json from django.http import JsonResponse # Create your views here. from", "from django.http import HttpResponseRedirect,HttpResponse from django.urls import reverse from django.views.decorators.csrf import csrf_exempt import", "json from django.http import JsonResponse # Create your views here. from . models", "django.views.decorators.csrf import csrf_exempt import json from django.http import JsonResponse # Create your views", "render(request,'disk/disk.html') @csrf_exempt def gateway(request): if request.method == 'POST': data = request.POST.get('value') data =", "DiskSchedAlg.objects.all() context = {'algos': algos} return render(request, 'disk/index.html',context = context) def detail(request,pk): alg", "'POST': data = request.POST.get('value') data = json.loads(data) alg = request.POST.get('algo') alg=json.loads(alg) print(data) if(alg==\"CSCAN\"):", "import get_object_or_404, render from django.http import HttpResponseRedirect,HttpResponse from django.urls import reverse from django.views.decorators.csrf", "render from django.http import HttpResponseRedirect,HttpResponse from django.urls import reverse from django.views.decorators.csrf import csrf_exempt", "= json.loads(data) alg = request.POST.get('algo') alg=json.loads(alg) print(data) if(alg==\"CSCAN\"): result = cscan(data) elif(alg==\"CLOOK\"): result", "{'alg':alg, } return render(request,'disk/detail.html',context=context) def demo(request): return render(request,'disk/disk.html') @csrf_exempt def gateway(request): if request.method", "here. from . models import DiskSchedAlg from . utils import cscan,clook, scan,look,sstf,fcfs def", "= clook(data) elif(alg==\"FCFS\"): result = fcfs(data) elif(alg==\"SCAN\"): result = scan(data) elif(alg==\"LOOK\"): result =", "= DiskSchedAlg.objects.all() context = {'algos': algos} return render(request, 'disk/index.html',context = context) def detail(request,pk):", "= cscan(data) elif(alg==\"CLOOK\"): result = clook(data) elif(alg==\"FCFS\"): result = fcfs(data) elif(alg==\"SCAN\"): result =", "result = cscan(data) elif(alg==\"CLOOK\"): result = clook(data) elif(alg==\"FCFS\"): result = fcfs(data) elif(alg==\"SCAN\"): result", "scan,look,sstf,fcfs def home(request): algos = DiskSchedAlg.objects.all() context = {'algos': algos} return render(request, 'disk/index.html',context", "= scan(data) elif(alg==\"LOOK\"): result = look(data) elif(alg==\"SSTF\"): result = sstf(data) print(result) return JsonResponse({'output':result})", "<filename>ossim/disk/views.py from django.shortcuts import get_object_or_404, render from django.http import HttpResponseRedirect,HttpResponse from django.urls import", "views here. from . models import DiskSchedAlg from . utils import cscan,clook, scan,look,sstf,fcfs", "result = scan(data) elif(alg==\"LOOK\"): result = look(data) elif(alg==\"SSTF\"): result = sstf(data) print(result) return", "= {'alg':alg, } return render(request,'disk/detail.html',context=context) def demo(request): return render(request,'disk/disk.html') @csrf_exempt def gateway(request): if", "context = {'alg':alg, } return render(request,'disk/detail.html',context=context) def demo(request): return render(request,'disk/disk.html') @csrf_exempt def gateway(request):", "result = clook(data) elif(alg==\"FCFS\"): result = fcfs(data) elif(alg==\"SCAN\"): result = scan(data) elif(alg==\"LOOK\"): result", "request.POST.get('value') data = json.loads(data) alg = request.POST.get('algo') alg=json.loads(alg) print(data) if(alg==\"CSCAN\"): result = cscan(data)", "HttpResponseRedirect,HttpResponse from django.urls import reverse from django.views.decorators.csrf import csrf_exempt import json from django.http", "import json from django.http import JsonResponse # Create your views here. from .", "demo(request): return render(request,'disk/disk.html') @csrf_exempt def gateway(request): if request.method == 'POST': data = request.POST.get('value')", "request.POST.get('algo') alg=json.loads(alg) print(data) if(alg==\"CSCAN\"): result = cscan(data) elif(alg==\"CLOOK\"): result = clook(data) elif(alg==\"FCFS\"): result", "from django.urls import reverse from django.views.decorators.csrf import csrf_exempt import json from django.http import", "def home(request): algos = DiskSchedAlg.objects.all() context = {'algos': algos} return render(request, 'disk/index.html',context =", "= get_object_or_404(DiskSchedAlg, pk=pk) context = {'alg':alg, } return render(request,'disk/detail.html',context=context) def demo(request): return render(request,'disk/disk.html')", "@csrf_exempt def gateway(request): if request.method == 'POST': data = request.POST.get('value') data = json.loads(data)", "elif(alg==\"FCFS\"): result = fcfs(data) elif(alg==\"SCAN\"): result = scan(data) elif(alg==\"LOOK\"): result = look(data) elif(alg==\"SSTF\"):", "= request.POST.get('value') data = json.loads(data) alg = request.POST.get('algo') alg=json.loads(alg) print(data) if(alg==\"CSCAN\"): result =", "context) def detail(request,pk): alg = get_object_or_404(DiskSchedAlg, pk=pk) context = {'alg':alg, } return render(request,'disk/detail.html',context=context)", "pk=pk) context = {'alg':alg, } return render(request,'disk/detail.html',context=context) def demo(request): return render(request,'disk/disk.html') @csrf_exempt def", "import DiskSchedAlg from . utils import cscan,clook, scan,look,sstf,fcfs def home(request): algos = DiskSchedAlg.objects.all()", "from django.views.decorators.csrf import csrf_exempt import json from django.http import JsonResponse # Create your", "from django.http import JsonResponse # Create your views here. from . models import", "detail(request,pk): alg = get_object_or_404(DiskSchedAlg, pk=pk) context = {'alg':alg, } return render(request,'disk/detail.html',context=context) def demo(request):", "algos = DiskSchedAlg.objects.all() context = {'algos': algos} return render(request, 'disk/index.html',context = context) def", "return render(request,'disk/detail.html',context=context) def demo(request): return render(request,'disk/disk.html') @csrf_exempt def gateway(request): if request.method == 'POST':", "cscan(data) elif(alg==\"CLOOK\"): result = clook(data) elif(alg==\"FCFS\"): result = fcfs(data) elif(alg==\"SCAN\"): result = scan(data)", "get_object_or_404, render from django.http import HttpResponseRedirect,HttpResponse from django.urls import reverse from django.views.decorators.csrf import", "return render(request,'disk/disk.html') @csrf_exempt def gateway(request): if request.method == 'POST': data = request.POST.get('value') data", "from . models import DiskSchedAlg from . utils import cscan,clook, scan,look,sstf,fcfs def home(request):" ]
[ "' & '.join(', '.join(i['name'] for i in names).rsplit(', ', 1)) # Other def", "'' if len(names) == 1: return names[0]['name'] if len(names) == 2: return '", "kyu/6 kyu - Format a string of names like 'Bart, Lista e Maggie'.py", "1: return names[0]['name'] if len(names) == 2: return ' & '.join([i['name'] for i", "- Format a string of names like 'Bart, Lista e Maggie'.py # https://www.codewars.com/kata/53368a47e38700bd8300030d/train/python", "namelist(names): return ' & '.join(', '.join(i['name'] for i in names).rsplit(', ', 1)) #", "a string of names like 'Bart, Lista e Maggie'.py # https://www.codewars.com/kata/53368a47e38700bd8300030d/train/python # My", "solution def namelist(names): return ' & '.join(', '.join(i['name'] for i in names).rsplit(', ',", "len(names) == 1: return names[0]['name'] if len(names) == 2: return ' & '.join([i['name']", "'Bart, Lista e Maggie'.py # https://www.codewars.com/kata/53368a47e38700bd8300030d/train/python # My solution def namelist(names): return '", "', 1)) # Other def namelist(names): if not names: return '' if len(names)", "My solution def namelist(names): return ' & '.join(', '.join(i['name'] for i in names).rsplit(',", "of names like 'Bart, Lista e Maggie'.py # https://www.codewars.com/kata/53368a47e38700bd8300030d/train/python # My solution def", "'.join(', '.join(i['name'] for i in names).rsplit(', ', 1)) # Other def namelist(names): if", "for i in names).rsplit(', ', 1)) # Other def namelist(names): if not names:", "https://www.codewars.com/kata/53368a47e38700bd8300030d/train/python # My solution def namelist(names): return ' & '.join(', '.join(i['name'] for i", "names[0]['name'] if len(names) == 2: return ' & '.join([i['name'] for i in names])", "<filename>Python/6 - kyu/6 kyu - Format a string of names like 'Bart, Lista", "return names[0]['name'] if len(names) == 2: return ' & '.join([i['name'] for i in", "if not names: return '' if len(names) == 1: return names[0]['name'] if len(names)", "return ' & '.join([i['name'] for i in names]) return names[0]['name'] + ', '", "kyu - Format a string of names like 'Bart, Lista e Maggie'.py #", "Maggie'.py # https://www.codewars.com/kata/53368a47e38700bd8300030d/train/python # My solution def namelist(names): return ' & '.join(', '.join(i['name']", "Format a string of names like 'Bart, Lista e Maggie'.py # https://www.codewars.com/kata/53368a47e38700bd8300030d/train/python #", "len(names) == 2: return ' & '.join([i['name'] for i in names]) return names[0]['name']", "string of names like 'Bart, Lista e Maggie'.py # https://www.codewars.com/kata/53368a47e38700bd8300030d/train/python # My solution", "i in names).rsplit(', ', 1)) # Other def namelist(names): if not names: return", "like 'Bart, Lista e Maggie'.py # https://www.codewars.com/kata/53368a47e38700bd8300030d/train/python # My solution def namelist(names): return", "'.join(i['name'] for i in names).rsplit(', ', 1)) # Other def namelist(names): if not", "== 2: return ' & '.join([i['name'] for i in names]) return names[0]['name'] +", "names like 'Bart, Lista e Maggie'.py # https://www.codewars.com/kata/53368a47e38700bd8300030d/train/python # My solution def namelist(names):", "in names).rsplit(', ', 1)) # Other def namelist(names): if not names: return ''", "# My solution def namelist(names): return ' & '.join(', '.join(i['name'] for i in", "== 1: return names[0]['name'] if len(names) == 2: return ' & '.join([i['name'] for", "- kyu/6 kyu - Format a string of names like 'Bart, Lista e", "return '' if len(names) == 1: return names[0]['name'] if len(names) == 2: return", "' & '.join([i['name'] for i in names]) return names[0]['name'] + ', ' +", "& '.join(', '.join(i['name'] for i in names).rsplit(', ', 1)) # Other def namelist(names):", "def namelist(names): return ' & '.join(', '.join(i['name'] for i in names).rsplit(', ', 1))", "# Other def namelist(names): if not names: return '' if len(names) == 1:", "Other def namelist(names): if not names: return '' if len(names) == 1: return", "not names: return '' if len(names) == 1: return names[0]['name'] if len(names) ==", "1)) # Other def namelist(names): if not names: return '' if len(names) ==", "if len(names) == 1: return names[0]['name'] if len(names) == 2: return ' &", "e Maggie'.py # https://www.codewars.com/kata/53368a47e38700bd8300030d/train/python # My solution def namelist(names): return ' & '.join(',", "if len(names) == 2: return ' & '.join([i['name'] for i in names]) return", "names: return '' if len(names) == 1: return names[0]['name'] if len(names) == 2:", "# https://www.codewars.com/kata/53368a47e38700bd8300030d/train/python # My solution def namelist(names): return ' & '.join(', '.join(i['name'] for", "Lista e Maggie'.py # https://www.codewars.com/kata/53368a47e38700bd8300030d/train/python # My solution def namelist(names): return ' &", "def namelist(names): if not names: return '' if len(names) == 1: return names[0]['name']", "return ' & '.join(', '.join(i['name'] for i in names).rsplit(', ', 1)) # Other", "namelist(names): if not names: return '' if len(names) == 1: return names[0]['name'] if", "2: return ' & '.join([i['name'] for i in names]) return names[0]['name'] + ',", "names).rsplit(', ', 1)) # Other def namelist(names): if not names: return '' if", "& '.join([i['name'] for i in names]) return names[0]['name'] + ', ' + namelist(names[1:])" ]
[ "2 ** (exp + 1) <= n: exp += 1 print(2 ** exp)", "** 7) n = int(input()) exp = 0 while 2 ** (exp +", "int(input()) exp = 0 while 2 ** (exp + 1) <= n: exp", "sys input = sys.stdin.readline sys.setrecursionlimit(10 ** 7) n = int(input()) exp = 0", "import sys input = sys.stdin.readline sys.setrecursionlimit(10 ** 7) n = int(input()) exp =", "exp = 0 while 2 ** (exp + 1) <= n: exp +=", "= sys.stdin.readline sys.setrecursionlimit(10 ** 7) n = int(input()) exp = 0 while 2", "sys.stdin.readline sys.setrecursionlimit(10 ** 7) n = int(input()) exp = 0 while 2 **", "n = int(input()) exp = 0 while 2 ** (exp + 1) <=", "= int(input()) exp = 0 while 2 ** (exp + 1) <= n:", "sys.setrecursionlimit(10 ** 7) n = int(input()) exp = 0 while 2 ** (exp", "while 2 ** (exp + 1) <= n: exp += 1 print(2 **", "= 0 while 2 ** (exp + 1) <= n: exp += 1", "input = sys.stdin.readline sys.setrecursionlimit(10 ** 7) n = int(input()) exp = 0 while", "7) n = int(input()) exp = 0 while 2 ** (exp + 1)", "0 while 2 ** (exp + 1) <= n: exp += 1 print(2" ]
[ "os.environ['DJANGO_SETTINGS_MODULE'] = 'orm.settings' from orm.models import * def lock_application(func): def lock_and_call(application_name, *args, **kwargs):", "* def lock_application(func): def lock_and_call(application_name, *args, **kwargs): lock = LocalApplicationLocks.lock(application_name) try: func(application_name, *args,", "def lock_and_call(application_name, *args, **kwargs): lock = LocalApplicationLocks.lock(application_name) try: func(application_name, *args, **kwargs) finally: LocalApplicationLocks.unlock(lock)", "def lock_application(func): def lock_and_call(application_name, *args, **kwargs): lock = LocalApplicationLocks.lock(application_name) try: func(application_name, *args, **kwargs)", "orm.models import * def lock_application(func): def lock_and_call(application_name, *args, **kwargs): lock = LocalApplicationLocks.lock(application_name) try:", "import * def lock_application(func): def lock_and_call(application_name, *args, **kwargs): lock = LocalApplicationLocks.lock(application_name) try: func(application_name,", "os os.environ['DJANGO_SETTINGS_MODULE'] = 'orm.settings' from orm.models import * def lock_application(func): def lock_and_call(application_name, *args,", "'orm.settings' from orm.models import * def lock_application(func): def lock_and_call(application_name, *args, **kwargs): lock =", "<gh_stars>10-100 import os os.environ['DJANGO_SETTINGS_MODULE'] = 'orm.settings' from orm.models import * def lock_application(func): def", "lock_and_call(application_name, *args, **kwargs): lock = LocalApplicationLocks.lock(application_name) try: func(application_name, *args, **kwargs) finally: LocalApplicationLocks.unlock(lock) return", "= 'orm.settings' from orm.models import * def lock_application(func): def lock_and_call(application_name, *args, **kwargs): lock", "import os os.environ['DJANGO_SETTINGS_MODULE'] = 'orm.settings' from orm.models import * def lock_application(func): def lock_and_call(application_name,", "*args, **kwargs): lock = LocalApplicationLocks.lock(application_name) try: func(application_name, *args, **kwargs) finally: LocalApplicationLocks.unlock(lock) return lock_and_call", "from orm.models import * def lock_application(func): def lock_and_call(application_name, *args, **kwargs): lock = LocalApplicationLocks.lock(application_name)", "lock_application(func): def lock_and_call(application_name, *args, **kwargs): lock = LocalApplicationLocks.lock(application_name) try: func(application_name, *args, **kwargs) finally:" ]
[ "Middleware shouldn't touch responses outside of its mimetypes\" cors = CORSMiddleware() request =", "amet', mimetype='text/html') cors.process_response(request, response) self.assertFalse(response.has_header('access-control-allow-origin')) def test_custom_settings(self): \"CORS Middleware shouldn't touch responses outside", "= HttpResponse('Lorem ipsum dolor sit amet', mimetype='text/html') cors.process_response(request, response) self.assertFalse(response.has_header('access-control-allow-origin')) def test_custom_settings(self): \"CORS", "request = HttpRequest() request.path = \"/\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'],", ")), ('/bar', ('application/json', ), (('Access-Control-Allow-Origin', 'example.com'), )), ('/', ('application/json', ), (('Access-Control-Allow-Origin', '*'), )),", "HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') request.path = \"/foo/bar/baaz/quux\" cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], 'foo.example.com')", "outside of its mimetypes\" cors = CORSMiddleware() request = HttpRequest() request.path = \"/cicero\"", "sit amet', mimetype='text/html') cors.process_response(request, response) self.assertFalse(response.has_header('access-control-allow-origin')) def test_custom_settings(self): \"CORS Middleware shouldn't touch responses", "'*') request.path = \"/foo/bar/baaz/quux\" cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], 'foo.example.com') request.path = \"/bar/baaz/quux\" cors.process_response(request, response)", "HttpRequest, HttpResponse from sugar.middleware.cors import CORSMiddleware class CORSTests(TestCase): def test_middleware(self): cors = CORSMiddleware()", "touch responses outside of its mimetypes\" cors = CORSMiddleware() request = HttpRequest() request.path", "HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') def test_non_interference(self): \"CORS Middleware shouldn't touch responses", "HttpRequest() request.path = \"/cicero\" response = HttpResponse('Lorem ipsum dolor sit amet', mimetype='text/html') cors.process_response(request,", "HttpResponse from sugar.middleware.cors import CORSMiddleware class CORSTests(TestCase): def test_middleware(self): cors = CORSMiddleware() request", "), (('Access-Control-Allow-Origin', '*'), )), ) cors = CORSMiddleware() request = HttpRequest() request.path =", "import CORSMiddleware class CORSTests(TestCase): def test_middleware(self): cors = CORSMiddleware() request = HttpRequest() request.path", ") cors = CORSMiddleware() request = HttpRequest() request.path = \"/test\" response = HttpResponse('[\"foo\"]',", "= CORSMiddleware() request = HttpRequest() request.path = \"/test\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request,", "), (('Access-Control-Allow-Origin', 'example.com'), )), ('/', ('application/json', ), (('Access-Control-Allow-Origin', '*'), )), ) cors =", "cors = CORSMiddleware() request = HttpRequest() request.path = \"/test\" response = HttpResponse('[\"foo\"]', mimetype='application/json')", "settings.CORS_PATHS = ( ('/foo', ('application/json', ), (('Access-Control-Allow-Origin', 'foo.example.com'), )), ('/bar', ('application/json', ), (('Access-Control-Allow-Origin',", "dolor sit amet', mimetype='text/html') cors.process_response(request, response) self.assertFalse(response.has_header('access-control-allow-origin')) def test_custom_settings(self): \"CORS Middleware shouldn't touch", "import settings from django.test.testcases import TestCase from django.http import HttpRequest, HttpResponse from sugar.middleware.cors", "HttpRequest() request.path = \"/\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') def", "import TestCase from django.http import HttpRequest, HttpResponse from sugar.middleware.cors import CORSMiddleware class CORSTests(TestCase):", "\"/cicero\" response = HttpResponse('Lorem ipsum dolor sit amet', mimetype='text/html') cors.process_response(request, response) self.assertFalse(response.has_header('access-control-allow-origin')) def", "= \"/cicero\" response = HttpResponse('Lorem ipsum dolor sit amet', mimetype='text/html') cors.process_response(request, response) self.assertFalse(response.has_header('access-control-allow-origin'))", "test_middleware(self): cors = CORSMiddleware() request = HttpRequest() request.path = \"/\" response = HttpResponse('[\"foo\"]',", "from sugar.middleware.cors import CORSMiddleware class CORSTests(TestCase): def test_middleware(self): cors = CORSMiddleware() request =", "test_non_interference(self): \"CORS Middleware shouldn't touch responses outside of its mimetypes\" cors = CORSMiddleware()", "response = HttpResponse('Lorem ipsum dolor sit amet', mimetype='text/html') cors.process_response(request, response) self.assertFalse(response.has_header('access-control-allow-origin')) def test_custom_settings(self):", "('application/json', ), (('Access-Control-Allow-Origin', 'foo.example.com'), )), ('/bar', ('application/json', ), (('Access-Control-Allow-Origin', 'example.com'), )), ('/', ('application/json',", "request = HttpRequest() request.path = \"/test\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'],", "cors.process_response(request, response) self.assertFalse(response.has_header('access-control-allow-origin')) def test_custom_settings(self): \"CORS Middleware shouldn't touch responses outside of its", "('/bar', ('application/json', ), (('Access-Control-Allow-Origin', 'example.com'), )), ('/', ('application/json', ), (('Access-Control-Allow-Origin', '*'), )), )", "TestCase from django.http import HttpRequest, HttpResponse from sugar.middleware.cors import CORSMiddleware class CORSTests(TestCase): def", "= HttpRequest() request.path = \"/\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*')", "its mimetypes\" settings.CORS_PATHS = ( ('/foo', ('application/json', ), (('Access-Control-Allow-Origin', 'foo.example.com'), )), ('/bar', ('application/json',", "= \"/test\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') request.path = \"/foo/bar/baaz/quux\"", "= \"/\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') def test_non_interference(self): \"CORS", "responses outside of its mimetypes\" cors = CORSMiddleware() request = HttpRequest() request.path =", "cors = CORSMiddleware() request = HttpRequest() request.path = \"/\" response = HttpResponse('[\"foo\"]', mimetype='application/json')", "response) self.assertFalse(response.has_header('access-control-allow-origin')) def test_custom_settings(self): \"CORS Middleware shouldn't touch responses outside of its mimetypes\"", "HttpRequest() request.path = \"/test\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') request.path", "of its mimetypes\" cors = CORSMiddleware() request = HttpRequest() request.path = \"/cicero\" response", "CORSTests(TestCase): def test_middleware(self): cors = CORSMiddleware() request = HttpRequest() request.path = \"/\" response", "django.http import HttpRequest, HttpResponse from sugar.middleware.cors import CORSMiddleware class CORSTests(TestCase): def test_middleware(self): cors", "CORSMiddleware() request = HttpRequest() request.path = \"/cicero\" response = HttpResponse('Lorem ipsum dolor sit", "of its mimetypes\" settings.CORS_PATHS = ( ('/foo', ('application/json', ), (('Access-Control-Allow-Origin', 'foo.example.com'), )), ('/bar',", ")), ('/', ('application/json', ), (('Access-Control-Allow-Origin', '*'), )), ) cors = CORSMiddleware() request =", "response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') def test_non_interference(self): \"CORS Middleware shouldn't", "<filename>sugar/tests/cors.py from django.conf import settings from django.test.testcases import TestCase from django.http import HttpRequest,", "response) self.assertEqual(response['access-control-allow-origin'], '*') request.path = \"/foo/bar/baaz/quux\" cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], 'foo.example.com') request.path = \"/bar/baaz/quux\"", "ipsum dolor sit amet', mimetype='text/html') cors.process_response(request, response) self.assertFalse(response.has_header('access-control-allow-origin')) def test_custom_settings(self): \"CORS Middleware shouldn't", "cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') request.path = \"/foo/bar/baaz/quux\" cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], 'foo.example.com') request.path =", "mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') request.path = \"/foo/bar/baaz/quux\" cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], 'foo.example.com') request.path", "(('Access-Control-Allow-Origin', 'foo.example.com'), )), ('/bar', ('application/json', ), (('Access-Control-Allow-Origin', 'example.com'), )), ('/', ('application/json', ), (('Access-Control-Allow-Origin',", "), (('Access-Control-Allow-Origin', 'foo.example.com'), )), ('/bar', ('application/json', ), (('Access-Control-Allow-Origin', 'example.com'), )), ('/', ('application/json', ),", "import HttpRequest, HttpResponse from sugar.middleware.cors import CORSMiddleware class CORSTests(TestCase): def test_middleware(self): cors =", "class CORSTests(TestCase): def test_middleware(self): cors = CORSMiddleware() request = HttpRequest() request.path = \"/\"", "= CORSMiddleware() request = HttpRequest() request.path = \"/\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request,", "= HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') def test_non_interference(self): \"CORS Middleware shouldn't touch", "self.assertFalse(response.has_header('access-control-allow-origin')) def test_custom_settings(self): \"CORS Middleware shouldn't touch responses outside of its mimetypes\" settings.CORS_PATHS", "Middleware shouldn't touch responses outside of its mimetypes\" settings.CORS_PATHS = ( ('/foo', ('application/json',", "mimetypes\" settings.CORS_PATHS = ( ('/foo', ('application/json', ), (('Access-Control-Allow-Origin', 'foo.example.com'), )), ('/bar', ('application/json', ),", "\"/\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') def test_non_interference(self): \"CORS Middleware", "response) self.assertEqual(response['access-control-allow-origin'], '*') def test_non_interference(self): \"CORS Middleware shouldn't touch responses outside of its", "from django.http import HttpRequest, HttpResponse from sugar.middleware.cors import CORSMiddleware class CORSTests(TestCase): def test_middleware(self):", "settings from django.test.testcases import TestCase from django.http import HttpRequest, HttpResponse from sugar.middleware.cors import", "request.path = \"/test\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') request.path =", "= HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') request.path = \"/foo/bar/baaz/quux\" cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'],", "= HttpRequest() request.path = \"/cicero\" response = HttpResponse('Lorem ipsum dolor sit amet', mimetype='text/html')", "HttpResponse('Lorem ipsum dolor sit amet', mimetype='text/html') cors.process_response(request, response) self.assertFalse(response.has_header('access-control-allow-origin')) def test_custom_settings(self): \"CORS Middleware", "= \"/foo/bar/baaz/quux\" cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], 'foo.example.com') request.path = \"/bar/baaz/quux\" cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], 'example.com')", "touch responses outside of its mimetypes\" settings.CORS_PATHS = ( ('/foo', ('application/json', ), (('Access-Control-Allow-Origin',", "def test_non_interference(self): \"CORS Middleware shouldn't touch responses outside of its mimetypes\" cors =", "('application/json', ), (('Access-Control-Allow-Origin', 'example.com'), )), ('/', ('application/json', ), (('Access-Control-Allow-Origin', '*'), )), ) cors", "('/foo', ('application/json', ), (('Access-Control-Allow-Origin', 'foo.example.com'), )), ('/bar', ('application/json', ), (('Access-Control-Allow-Origin', 'example.com'), )), ('/',", "(('Access-Control-Allow-Origin', '*'), )), ) cors = CORSMiddleware() request = HttpRequest() request.path = \"/test\"", "request.path = \"/\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') def test_non_interference(self):", "cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') def test_non_interference(self): \"CORS Middleware shouldn't touch responses outside of", "mimetype='text/html') cors.process_response(request, response) self.assertFalse(response.has_header('access-control-allow-origin')) def test_custom_settings(self): \"CORS Middleware shouldn't touch responses outside of", "\"/test\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') request.path = \"/foo/bar/baaz/quux\" cors.process_response(request,", "('/', ('application/json', ), (('Access-Control-Allow-Origin', '*'), )), ) cors = CORSMiddleware() request = HttpRequest()", "shouldn't touch responses outside of its mimetypes\" settings.CORS_PATHS = ( ('/foo', ('application/json', ),", "cors = CORSMiddleware() request = HttpRequest() request.path = \"/cicero\" response = HttpResponse('Lorem ipsum", "= CORSMiddleware() request = HttpRequest() request.path = \"/cicero\" response = HttpResponse('Lorem ipsum dolor", "'foo.example.com'), )), ('/bar', ('application/json', ), (('Access-Control-Allow-Origin', 'example.com'), )), ('/', ('application/json', ), (('Access-Control-Allow-Origin', '*'),", "self.assertEqual(response['access-control-allow-origin'], '*') request.path = \"/foo/bar/baaz/quux\" cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], 'foo.example.com') request.path = \"/bar/baaz/quux\" cors.process_response(request,", "CORSMiddleware() request = HttpRequest() request.path = \"/test\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response)", "shouldn't touch responses outside of its mimetypes\" cors = CORSMiddleware() request = HttpRequest()", "\"CORS Middleware shouldn't touch responses outside of its mimetypes\" settings.CORS_PATHS = ( ('/foo',", "sugar.middleware.cors import CORSMiddleware class CORSTests(TestCase): def test_middleware(self): cors = CORSMiddleware() request = HttpRequest()", "self.assertEqual(response['access-control-allow-origin'], '*') def test_non_interference(self): \"CORS Middleware shouldn't touch responses outside of its mimetypes\"", "( ('/foo', ('application/json', ), (('Access-Control-Allow-Origin', 'foo.example.com'), )), ('/bar', ('application/json', ), (('Access-Control-Allow-Origin', 'example.com'), )),", "'*'), )), ) cors = CORSMiddleware() request = HttpRequest() request.path = \"/test\" response", "= ( ('/foo', ('application/json', ), (('Access-Control-Allow-Origin', 'foo.example.com'), )), ('/bar', ('application/json', ), (('Access-Control-Allow-Origin', 'example.com'),", "responses outside of its mimetypes\" settings.CORS_PATHS = ( ('/foo', ('application/json', ), (('Access-Control-Allow-Origin', 'foo.example.com'),", "= HttpRequest() request.path = \"/test\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*')", "def test_custom_settings(self): \"CORS Middleware shouldn't touch responses outside of its mimetypes\" settings.CORS_PATHS =", "'*') def test_non_interference(self): \"CORS Middleware shouldn't touch responses outside of its mimetypes\" cors", "'example.com'), )), ('/', ('application/json', ), (('Access-Control-Allow-Origin', '*'), )), ) cors = CORSMiddleware() request", "request.path = \"/cicero\" response = HttpResponse('Lorem ipsum dolor sit amet', mimetype='text/html') cors.process_response(request, response)", "response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') request.path = \"/foo/bar/baaz/quux\" cors.process_response(request, response)", ")), ) cors = CORSMiddleware() request = HttpRequest() request.path = \"/test\" response =", "request = HttpRequest() request.path = \"/cicero\" response = HttpResponse('Lorem ipsum dolor sit amet',", "django.test.testcases import TestCase from django.http import HttpRequest, HttpResponse from sugar.middleware.cors import CORSMiddleware class", "mimetype='application/json') cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], '*') def test_non_interference(self): \"CORS Middleware shouldn't touch responses outside", "CORSMiddleware class CORSTests(TestCase): def test_middleware(self): cors = CORSMiddleware() request = HttpRequest() request.path =", "(('Access-Control-Allow-Origin', 'example.com'), )), ('/', ('application/json', ), (('Access-Control-Allow-Origin', '*'), )), ) cors = CORSMiddleware()", "django.conf import settings from django.test.testcases import TestCase from django.http import HttpRequest, HttpResponse from", "its mimetypes\" cors = CORSMiddleware() request = HttpRequest() request.path = \"/cicero\" response =", "mimetypes\" cors = CORSMiddleware() request = HttpRequest() request.path = \"/cicero\" response = HttpResponse('Lorem", "('application/json', ), (('Access-Control-Allow-Origin', '*'), )), ) cors = CORSMiddleware() request = HttpRequest() request.path", "request.path = \"/foo/bar/baaz/quux\" cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'], 'foo.example.com') request.path = \"/bar/baaz/quux\" cors.process_response(request, response) self.assertEqual(response['access-control-allow-origin'],", "CORSMiddleware() request = HttpRequest() request.path = \"/\" response = HttpResponse('[\"foo\"]', mimetype='application/json') cors.process_response(request, response)", "from django.test.testcases import TestCase from django.http import HttpRequest, HttpResponse from sugar.middleware.cors import CORSMiddleware", "def test_middleware(self): cors = CORSMiddleware() request = HttpRequest() request.path = \"/\" response =", "test_custom_settings(self): \"CORS Middleware shouldn't touch responses outside of its mimetypes\" settings.CORS_PATHS = (", "\"CORS Middleware shouldn't touch responses outside of its mimetypes\" cors = CORSMiddleware() request", "from django.conf import settings from django.test.testcases import TestCase from django.http import HttpRequest, HttpResponse", "outside of its mimetypes\" settings.CORS_PATHS = ( ('/foo', ('application/json', ), (('Access-Control-Allow-Origin', 'foo.example.com'), ))," ]
[ "- z.mean(axis=0)[None, :]) # Build PCA results as xarray Dataset and save to", "the # SalishSeaCast surface nitrate and temperature records. # # required for the", "xr import sys from datetime import datetime, timedelta from scipy import signal, fft", "= landmask.ravel().astype(bool) xflat, yflat = [var.ravel()[maskflat] for var in np.meshgrid(x, y)] # Calculate", "axis=1) isegments, iseason = tools.calc_seasonal_indices(coords['time'], nitrate) coords['time'] = coords['time'][iseason] # Build flattened, subsampled", "# <NAME> and <NAME>: Wind-driven upwelling and # surface nutrient delivery in a", "= slice(None, None, subsample) with xr.open_dataset(results_path + 'MooreMaleyAllenOS2022_modelfields.nc') as ds: data = {var:", "Analysis of Data Matrices. Holt, Rinehart and Winston. New York, USA. Chapter 18:", "= E.dot(R) # Sort rotated matrices isort = var_rot.argsort()[::-1] # Return xarray-compatible netCDF", "[range(dim) for dim in landmask.shape] maskflat = landmask.ravel().astype(bool) xflat, yflat = [var.ravel()[maskflat] for", "'var_rot': ('mode', PCA['var_rot']), } xr.Dataset(variables, coords).to_netcdf(results_path + var + '_PCA.nc') if __name__ ==", "Sci., 2022. # # $ cd scripts # $ python3 PCA.py /path/to/files import", "- Section 18.7: Mathematical Proofs, Equations 18.7.32-54, pp. 437-438 The algorithm described in", "A_prime.dot(np.diag(sqrtL)) A2 = A * A var = A2.sum(axis=0) / A2.sum() E =", "xflat, valuesflat): valuesgridded[y, x, :] = row return valuesgridded def varimax(A, maxiter=40, tol=1e-5):", "build_PCA_files(results_path, subsample=5, cutoff=1235): \"\"\"Call the principal component analysis and varimax rotation functions and", "valuesgridded def varimax(A, maxiter=40, tol=1e-5): \"\"\"Calculate the varimax rotation matrix H from the", "pp. 428-429 - Section 18.7: Mathematical Proofs, Equations 18.7.32-54, pp. 437-438 The algorithm", "'B': (['time', 'mode'], PCA['B']), 'E': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['E'])), 'U': (['y',", "has been reformulated to use SVD based on equivalent definitions for the rotation", "rotation functions and build them to netCDF output \"\"\" # Load aggregated results", "landmask = ds.landmask.values # Calculate seasonal indices waterpoints = tools.openwaterpoints(landmask) nitrate = np.median(tools.flatten(data['nitrate'],", "18.4: Simultaneous Factor Varimax Solution, Equations 18.4.1-10, pp. 428-429 - Section 18.7: Mathematical", "with xr.open_dataset(results_path + 'MooreMaleyAllenOS2022_modelfields.nc') as ds: data = {var: ds[var].values for var in", "in ('temperature', 'nitrate')} coords = {var: ds[var].values[slc] for var in ('x', 'y')} coords['time']", "according to the corresponding xflat, yflat coordinate arrays. This function assumes a 2D", "# required for the analyses presented in: # # <NAME> and <NAME>: Wind-driven", "for var in ('temperature', 'nitrate')} coords = {var: ds[var].values[slc] for var in ('x',", "for var in np.meshgrid(x, y)] # Calculate EOFs for var in ['temperature', 'nitrate']:", "- Section 18.4: Simultaneous Factor Varimax Solution, Equations 18.4.1-10, pp. 428-429 - Section", "tol: break return H def calc_PCA(z): \"\"\"Calculate EOF matrices of n x p", "Q_T = np.linalg.svd(A.T.dot(beta)) # ------------------------ 18.7.42 H = P.dot(Q_T) # ------------------------------------------------------ 18.7.45 d", "# # required for the analyses presented in: # # <NAME> and <NAME>:", "('x', 'y')} coords['time'] = tools.formattime(ds.time.values) landmask = ds.landmask.values # Calculate seasonal indices waterpoints", "and varimax rotation functions and build them to netCDF output \"\"\" # Load", "definitions for the rotation matrix described in Section 18.7. The eigenvalue matrix is", "flattened, subsampled coordinate arrays landmask = landmask[slc, slc] y, x = [range(dim) for", "H from the n x p PC loadings matrix A. H is determined", "U = E.dot(R) # Sort rotated matrices isort = var_rot.argsort()[::-1] # Return xarray-compatible", "flattened array with the landpoints removed according to the corresponding xflat, yflat coordinate", "tools.flatten(data[var][:, slc, slc], landmask) # Subtract lowpass filter and extract productive season z", "valuesgridded[y, x, :] = row return valuesgridded def varimax(A, maxiter=40, tol=1e-5): \"\"\"Calculate the", "them to netCDF output \"\"\" # Load aggregated results file slc = slice(None,", "python # # Code module for calculating the PCA matrices of the #", "'y')} coords['time'] = tools.formattime(ds.time.values) landmask = ds.landmask.values # Calculate seasonal indices waterpoints =", "for var in ['temperature', 'nitrate']: # Subsample and flatten raw = tools.flatten(data[var][:, slc,", "# Sort rotated matrices isort = var_rot.argsort()[::-1] # Return xarray-compatible netCDF dict PCA", "isegments, iseason = tools.calc_seasonal_indices(coords['time'], nitrate) coords['time'] = coords['time'][iseason] # Build flattened, subsampled coordinate", "# Subtract lowpass filter and extract productive season z = np.vstack([col - tools.lowpass(col,", "= np.linalg.svd(A.T.dot(beta)) # ------------------------ 18.7.42 H = P.dot(Q_T) # ------------------------------------------------------ 18.7.45 d =", "rotation matrix H from the n x p PC loadings matrix A. H", "netCDF output \"\"\" # Load aggregated results file slc = slice(None, None, subsample)", "to those found in Sci-kit learn, Matlab, R, and presumably others. \"\"\" #", "= coords['time'][iseason] # Build flattened, subsampled coordinate arrays landmask = landmask[slc, slc] y,", "Dataset and save to netCDF variables = { 'landmask': (['y', 'x'], landmask), 'median':", "('mode', PCA['var']), 'var_rot': ('mode', PCA['var_rot']), } xr.Dataset(variables, coords).to_netcdf(results_path + var + '_PCA.nc') if", "= (max(yflat)+1, max(xflat)+1, valuesflat.shape[1]) valuesgridded = np.zeros(shape) for y, x, row in zip(yflat,", "\"\"\" # Load aggregated results file slc = slice(None, None, subsample) with xr.open_dataset(results_path", "varimax(A, maxiter=40, tol=1e-5): \"\"\"Calculate the varimax rotation matrix H from the n x", "arrays landmask = landmask[slc, slc] y, x = [range(dim) for dim in landmask.shape]", "PCA.py /path/to/files import numpy as np import xarray as xr import sys from", "var = A2.sum(axis=0) / A2.sum() E = E_T.T # Get varimax rotation matrix", "Calculate EOFs for var in ['temperature', 'nitrate']: # Subsample and flatten raw =", "= landmask[slc, slc] y, x = [range(dim) for dim in landmask.shape] maskflat =", "\"\"\"Regrid a flattened array with the landpoints removed according to the corresponding xflat,", "= A_prime.dot(np.diag(sqrtL)) A2 = A * A var = A2.sum(axis=0) / A2.sum() E", "the corresponding xflat, yflat coordinate arrays. This function assumes a 2D input shape", "'U': U[:, isort], 'var_rot': var_rot[isort]} return PCA def build_PCA_files(results_path, subsample=5, cutoff=1235): \"\"\"Call the", "Build PCA results as xarray Dataset and save to netCDF variables = {", "PCA results as xarray Dataset and save to netCDF variables = { 'landmask':", "upwelling and # surface nutrient delivery in a semi-enclosed coastal sea, # Ocean", "n x p PC loadings matrix A. H is determined iteratively from the", "landmask), 'median': (['y', 'x'], np.median(data[var][iseason, slc, slc], axis=0)), 'A': (['time', 'mode'], PCA['A']), 'B':", "valuesflat): \"\"\"Regrid a flattened array with the landpoints removed according to the corresponding", "maskflat = landmask.ravel().astype(bool) xflat, yflat = [var.ravel()[maskflat] for var in np.meshgrid(x, y)] #", "PCA['var_rot']), } xr.Dataset(variables, coords).to_netcdf(results_path + var + '_PCA.nc') if __name__ == \"__main__\": build_PCA_files(sys.argv[1])", "and temperature records. # # required for the analyses presented in: # #", "coords['time'][iseason] # Build flattened, subsampled coordinate arrays landmask = landmask[slc, slc] y, x", "col in raw.T]).T[iseason, :] # Subtract mean and calculate PCA PCA = calc_PCA(z", "R = varimax(A) # Rotate matrices B = A.dot(R) B2 = B *", "and presumably others. \"\"\" # Initialization n, p = A.shape H = np.eye(p)", "for dim in landmask.shape] maskflat = landmask.ravel().astype(bool) xflat, yflat = [var.ravel()[maskflat] for var", "and extract productive season z = np.vstack([col - tools.lowpass(col, cutoff) for col in", "B var_rot = B2.sum(axis=0) / B2.sum() U = E.dot(R) # Sort rotated matrices", "Equations 18.4.1-10, pp. 428-429 - Section 18.7: Mathematical Proofs, Equations 18.7.32-54, pp. 437-438", "from: <NAME>. (1965) Factor Analysis of Data Matrices. Holt, Rinehart and Winston. New", "equivalent definitions for the rotation matrix described in Section 18.7. The eigenvalue matrix", "criterion. Adapted from: <NAME>. (1965) Factor Analysis of Data Matrices. Holt, Rinehart and", "# Calculate orthogonal PCA matrices A_prime, sqrtL, E_T = np.linalg.svd(z, full_matrices=False) A =", "in np.meshgrid(x, y)] # Calculate EOFs for var in ['temperature', 'nitrate']: # Subsample", "z = np.vstack([col - tools.lowpass(col, cutoff) for col in raw.T]).T[iseason, :] # Subtract", "x = [range(dim) for dim in landmask.shape] maskflat = landmask.ravel().astype(bool) xflat, yflat =", "calc_PCA(z): \"\"\"Calculate EOF matrices of n x p data matrix z using SVD", "timedelta from scipy import signal, fft from tqdm import tqdm import tools def", "var_rot[isort]} return PCA def build_PCA_files(results_path, subsample=5, cutoff=1235): \"\"\"Call the principal component analysis and", "scipy import signal, fft from tqdm import tqdm import tools def regrid(xflat, yflat,", "data matrix z using SVD and optional varimax rotation \"\"\" # Calculate orthogonal", "the varimax criterion. Adapted from: <NAME>. (1965) Factor Analysis of Data Matrices. Holt,", "PCA matrices A_prime, sqrtL, E_T = np.linalg.svd(z, full_matrices=False) A = A_prime.dot(np.diag(sqrtL)) A2 =", "z using SVD and optional varimax rotation \"\"\" # Calculate orthogonal PCA matrices", "analyses presented in: # # <NAME> and <NAME>: Wind-driven upwelling and # surface", "the n x p PC loadings matrix A. H is determined iteratively from", "437-438 The algorithm described in Section 18.4 has been reformulated to use SVD", "This version of the varimax algorithm is functionally identical to those found in", "Get varimax rotation matrix R = varimax(A) # Rotate matrices B = A.dot(R)", "slice(None, None, subsample) with xr.open_dataset(results_path + 'MooreMaleyAllenOS2022_modelfields.nc') as ds: data = {var: ds[var].values", "{ 'landmask': (['y', 'x'], landmask), 'median': (['y', 'x'], np.median(data[var][iseason, slc, slc], axis=0)), 'A':", "Matlab, R, and presumably others. \"\"\" # Initialization n, p = A.shape H", "file slc = slice(None, None, subsample) with xr.open_dataset(results_path + 'MooreMaleyAllenOS2022_modelfields.nc') as ds: data", "is used to evaluate convergence. This version of the varimax algorithm is functionally", "return valuesgridded def varimax(A, maxiter=40, tol=1e-5): \"\"\"Calculate the varimax rotation matrix H from", "0 and d/d_old < 1 + tol: break return H def calc_PCA(z): \"\"\"Calculate", "analysis and varimax rotation functions and build them to netCDF output \"\"\" #", "2D input shape for valuesflat of [space, mode]. \"\"\" shape = (max(yflat)+1, max(xflat)+1,", "x p data matrix z using SVD and optional varimax rotation \"\"\" #", "* B - B.dot(np.diag(np.diag(B.T.dot(B)))) / n # ---------- 18.4.6 P, Delta, Q_T =", "x p PC loadings matrix A. H is determined iteratively from the Lagrange", "functions and build them to netCDF output \"\"\" # Load aggregated results file", "'median': (['y', 'x'], np.median(data[var][iseason, slc, slc], axis=0)), 'A': (['time', 'mode'], PCA['A']), 'B': (['time',", "import numpy as np import xarray as xr import sys from datetime import", "B[:, isort], 'U': U[:, isort], 'var_rot': var_rot[isort]} return PCA def build_PCA_files(results_path, subsample=5, cutoff=1235):", "xflat, yflat = [var.ravel()[maskflat] for var in np.meshgrid(x, y)] # Calculate EOFs for", "# # Code module for calculating the PCA matrices of the # SalishSeaCast", "records. # # required for the analyses presented in: # # <NAME> and", "delivery in a semi-enclosed coastal sea, # Ocean Sci., 2022. # # $", "tools.openwaterpoints(landmask) nitrate = np.median(tools.flatten(data['nitrate'], waterpoints), axis=1) isegments, iseason = tools.calc_seasonal_indices(coords['time'], nitrate) coords['time'] =", "been reformulated to use SVD based on equivalent definitions for the rotation matrix", "is determined iteratively from the Lagrange multiplier optimization of the varimax criterion. Adapted", "'E': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['E'])), 'U': (['y', 'x', 'mode'], regrid(xflat, yflat,", "PCA = {'A': A, 'E': E, 'var': var, 'B': B[:, isort], 'U': U[:,", "= np.eye(p) d = 0 # Iteration for i in tqdm(range(maxiter), desc='Calculating rotation", "'var': var, 'B': B[:, isort], 'U': U[:, isort], 'var_rot': var_rot[isort]} return PCA def", "A2 = A * A var = A2.sum(axis=0) / A2.sum() E = E_T.T", "calculate PCA PCA = calc_PCA(z - z.mean(axis=0)[None, :]) # Build PCA results as", "xflat, yflat coordinate arrays. This function assumes a 2D input shape for valuesflat", "var in ['temperature', 'nitrate']: # Subsample and flatten raw = tools.flatten(data[var][:, slc, slc],", "'landmask': (['y', 'x'], landmask), 'median': (['y', 'x'], np.median(data[var][iseason, slc, slc], axis=0)), 'A': (['time',", "= 0 # Iteration for i in tqdm(range(maxiter), desc='Calculating rotation matrix'): d_old =", "EOFs for var in ['temperature', 'nitrate']: # Subsample and flatten raw = tools.flatten(data[var][:,", "slc = slice(None, None, subsample) with xr.open_dataset(results_path + 'MooreMaleyAllenOS2022_modelfields.nc') as ds: data =", "matrix'): d_old = d B = A.dot(H) # -------------------------------------------------------- 18.4.5 beta = B", "H = P.dot(Q_T) # ------------------------------------------------------ 18.7.45 d = sum(Delta) # Convergence if d_old", "tools.lowpass(col, cutoff) for col in raw.T]).T[iseason, :] # Subtract mean and calculate PCA", "= A.dot(H) # -------------------------------------------------------- 18.4.5 beta = B * B * B -", "\"\"\"Call the principal component analysis and varimax rotation functions and build them to", "principal component analysis and varimax rotation functions and build them to netCDF output", "Build flattened, subsampled coordinate arrays landmask = landmask[slc, slc] y, x = [range(dim)", "var in ('temperature', 'nitrate')} coords = {var: ds[var].values[slc] for var in ('x', 'y')}", "coords['time'] = tools.formattime(ds.time.values) landmask = ds.landmask.values # Calculate seasonal indices waterpoints = tools.openwaterpoints(landmask)", "tqdm import tools def regrid(xflat, yflat, valuesflat): \"\"\"Regrid a flattened array with the", "var in ('x', 'y')} coords['time'] = tools.formattime(ds.time.values) landmask = ds.landmask.values # Calculate seasonal", "PCA['E'])), 'U': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['U'])), 'var': ('mode', PCA['var']), 'var_rot': ('mode',", "if d_old != 0 and d/d_old < 1 + tol: break return H", "tools.calc_seasonal_indices(coords['time'], nitrate) coords['time'] = coords['time'][iseason] # Build flattened, subsampled coordinate arrays landmask =", "SVD based on equivalent definitions for the rotation matrix described in Section 18.7.", "'x', 'mode'], regrid(xflat, yflat, PCA['E'])), 'U': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['U'])), 'var':", "2022. # # $ cd scripts # $ python3 PCA.py /path/to/files import numpy", "for the analyses presented in: # # <NAME> and <NAME>: Wind-driven upwelling and", "in tqdm(range(maxiter), desc='Calculating rotation matrix'): d_old = d B = A.dot(H) # --------------------------------------------------------", "d B = A.dot(H) # -------------------------------------------------------- 18.4.5 beta = B * B *", "= tools.formattime(ds.time.values) landmask = ds.landmask.values # Calculate seasonal indices waterpoints = tools.openwaterpoints(landmask) nitrate", "and calculate PCA PCA = calc_PCA(z - z.mean(axis=0)[None, :]) # Build PCA results", "import tools def regrid(xflat, yflat, valuesflat): \"\"\"Regrid a flattened array with the landpoints", "A * A var = A2.sum(axis=0) / A2.sum() E = E_T.T # Get", "'U': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['U'])), 'var': ('mode', PCA['var']), 'var_rot': ('mode', PCA['var_rot']),", "18.7: Mathematical Proofs, Equations 18.7.32-54, pp. 437-438 The algorithm described in Section 18.4", "import datetime, timedelta from scipy import signal, fft from tqdm import tqdm import", "nitrate = np.median(tools.flatten(data['nitrate'], waterpoints), axis=1) isegments, iseason = tools.calc_seasonal_indices(coords['time'], nitrate) coords['time'] = coords['time'][iseason]", "18.4.1-10, pp. 428-429 - Section 18.7: Mathematical Proofs, Equations 18.7.32-54, pp. 437-438 The", "algorithm is functionally identical to those found in Sci-kit learn, Matlab, R, and", "B - B.dot(np.diag(np.diag(B.T.dot(B)))) / n # ---------- 18.4.6 P, Delta, Q_T = np.linalg.svd(A.T.dot(beta))", "B = A.dot(H) # -------------------------------------------------------- 18.4.5 beta = B * B * B", "$ python3 PCA.py /path/to/files import numpy as np import xarray as xr import", "d_old = d B = A.dot(H) # -------------------------------------------------------- 18.4.5 beta = B *", "= varimax(A) # Rotate matrices B = A.dot(R) B2 = B * B", "Calculate seasonal indices waterpoints = tools.openwaterpoints(landmask) nitrate = np.median(tools.flatten(data['nitrate'], waterpoints), axis=1) isegments, iseason", "use SVD based on equivalent definitions for the rotation matrix described in Section", "Data Matrices. Holt, Rinehart and Winston. New York, USA. Chapter 18: Analytical Rotations", "loadings matrix A. H is determined iteratively from the Lagrange multiplier optimization of", "for the rotation matrix described in Section 18.7. The eigenvalue matrix is used", "productive season z = np.vstack([col - tools.lowpass(col, cutoff) for col in raw.T]).T[iseason, :]", "/ B2.sum() U = E.dot(R) # Sort rotated matrices isort = var_rot.argsort()[::-1] #", "temperature records. # # required for the analyses presented in: # # <NAME>", "flatten raw = tools.flatten(data[var][:, slc, slc], landmask) # Subtract lowpass filter and extract", "presented in: # # <NAME> and <NAME>: Wind-driven upwelling and # surface nutrient", "# Build PCA results as xarray Dataset and save to netCDF variables =", "1 + tol: break return H def calc_PCA(z): \"\"\"Calculate EOF matrices of n", "H = np.eye(p) d = 0 # Iteration for i in tqdm(range(maxiter), desc='Calculating", "slc, slc], landmask) # Subtract lowpass filter and extract productive season z =", "iseason = tools.calc_seasonal_indices(coords['time'], nitrate) coords['time'] = coords['time'][iseason] # Build flattened, subsampled coordinate arrays", "yflat, PCA['U'])), 'var': ('mode', PCA['var']), 'var_rot': ('mode', PCA['var_rot']), } xr.Dataset(variables, coords).to_netcdf(results_path + var", "function assumes a 2D input shape for valuesflat of [space, mode]. \"\"\" shape", "18.4.6 P, Delta, Q_T = np.linalg.svd(A.T.dot(beta)) # ------------------------ 18.7.42 H = P.dot(Q_T) #", "shape for valuesflat of [space, mode]. \"\"\" shape = (max(yflat)+1, max(xflat)+1, valuesflat.shape[1]) valuesgridded", "convergence. This version of the varimax algorithm is functionally identical to those found", "zip(yflat, xflat, valuesflat): valuesgridded[y, x, :] = row return valuesgridded def varimax(A, maxiter=40,", "R, and presumably others. \"\"\" # Initialization n, p = A.shape H =", "removed according to the corresponding xflat, yflat coordinate arrays. This function assumes a", "coords['time'] = coords['time'][iseason] # Build flattened, subsampled coordinate arrays landmask = landmask[slc, slc]", "for var in ('x', 'y')} coords['time'] = tools.formattime(ds.time.values) landmask = ds.landmask.values # Calculate", "Sci-kit learn, Matlab, R, and presumably others. \"\"\" # Initialization n, p =", "= B * B * B - B.dot(np.diag(np.diag(B.T.dot(B)))) / n # ---------- 18.4.6", "E = E_T.T # Get varimax rotation matrix R = varimax(A) # Rotate", "version of the varimax algorithm is functionally identical to those found in Sci-kit", "for y, x, row in zip(yflat, xflat, valuesflat): valuesgridded[y, x, :] = row", ":] = row return valuesgridded def varimax(A, maxiter=40, tol=1e-5): \"\"\"Calculate the varimax rotation", "Section 18.4 has been reformulated to use SVD based on equivalent definitions for", "428-429 - Section 18.7: Mathematical Proofs, Equations 18.7.32-54, pp. 437-438 The algorithm described", "# $ python3 PCA.py /path/to/files import numpy as np import xarray as xr", "to use SVD based on equivalent definitions for the rotation matrix described in", "dim in landmask.shape] maskflat = landmask.ravel().astype(bool) xflat, yflat = [var.ravel()[maskflat] for var in", "Mathematical Proofs, Equations 18.7.32-54, pp. 437-438 The algorithm described in Section 18.4 has", ":]) # Build PCA results as xarray Dataset and save to netCDF variables", "# ------------------------------------------------------ 18.7.45 d = sum(Delta) # Convergence if d_old != 0 and", "subsample=5, cutoff=1235): \"\"\"Call the principal component analysis and varimax rotation functions and build", "import tqdm import tools def regrid(xflat, yflat, valuesflat): \"\"\"Regrid a flattened array with", "Adapted from: <NAME>. (1965) Factor Analysis of Data Matrices. Holt, Rinehart and Winston.", "- tools.lowpass(col, cutoff) for col in raw.T]).T[iseason, :] # Subtract mean and calculate", "= d B = A.dot(H) # -------------------------------------------------------- 18.4.5 beta = B * B", "as np import xarray as xr import sys from datetime import datetime, timedelta", "Ocean Sci., 2022. # # $ cd scripts # $ python3 PCA.py /path/to/files", "tools def regrid(xflat, yflat, valuesflat): \"\"\"Regrid a flattened array with the landpoints removed", "matrices isort = var_rot.argsort()[::-1] # Return xarray-compatible netCDF dict PCA = {'A': A,", "# Ocean Sci., 2022. # # $ cd scripts # $ python3 PCA.py", "valuesflat of [space, mode]. \"\"\" shape = (max(yflat)+1, max(xflat)+1, valuesflat.shape[1]) valuesgridded = np.zeros(shape)", "subsampled coordinate arrays landmask = landmask[slc, slc] y, x = [range(dim) for dim", "nutrient delivery in a semi-enclosed coastal sea, # Ocean Sci., 2022. # #", "\"\"\"Calculate EOF matrices of n x p data matrix z using SVD and", "PCA['var']), 'var_rot': ('mode', PCA['var_rot']), } xr.Dataset(variables, coords).to_netcdf(results_path + var + '_PCA.nc') if __name__", "/ A2.sum() E = E_T.T # Get varimax rotation matrix R = varimax(A)", "xarray-compatible netCDF dict PCA = {'A': A, 'E': E, 'var': var, 'B': B[:,", "d/d_old < 1 + tol: break return H def calc_PCA(z): \"\"\"Calculate EOF matrices", "matrix described in Section 18.7. The eigenvalue matrix is used to evaluate convergence.", "z.mean(axis=0)[None, :]) # Build PCA results as xarray Dataset and save to netCDF", "< 1 + tol: break return H def calc_PCA(z): \"\"\"Calculate EOF matrices of", "landmask.shape] maskflat = landmask.ravel().astype(bool) xflat, yflat = [var.ravel()[maskflat] for var in np.meshgrid(x, y)]", "p = A.shape H = np.eye(p) d = 0 # Iteration for i", "slc, slc], axis=0)), 'A': (['time', 'mode'], PCA['A']), 'B': (['time', 'mode'], PCA['B']), 'E': (['y',", "valuesflat): valuesgridded[y, x, :] = row return valuesgridded def varimax(A, maxiter=40, tol=1e-5): \"\"\"Calculate", "York, USA. Chapter 18: Analytical Rotations - Section 18.4: Simultaneous Factor Varimax Solution,", "('mode', PCA['var_rot']), } xr.Dataset(variables, coords).to_netcdf(results_path + var + '_PCA.nc') if __name__ == \"__main__\":", "data = {var: ds[var].values for var in ('temperature', 'nitrate')} coords = {var: ds[var].values[slc]", "yflat = [var.ravel()[maskflat] for var in np.meshgrid(x, y)] # Calculate EOFs for var", "import sys from datetime import datetime, timedelta from scipy import signal, fft from", "sum(Delta) # Convergence if d_old != 0 and d/d_old < 1 + tol:", "Sort rotated matrices isort = var_rot.argsort()[::-1] # Return xarray-compatible netCDF dict PCA =", "from datetime import datetime, timedelta from scipy import signal, fft from tqdm import", "in zip(yflat, xflat, valuesflat): valuesgridded[y, x, :] = row return valuesgridded def varimax(A,", "cutoff=1235): \"\"\"Call the principal component analysis and varimax rotation functions and build them", "from the n x p PC loadings matrix A. H is determined iteratively", "raw = tools.flatten(data[var][:, slc, slc], landmask) # Subtract lowpass filter and extract productive", "# ------------------------ 18.7.42 H = P.dot(Q_T) # ------------------------------------------------------ 18.7.45 d = sum(Delta) #", "PC loadings matrix A. H is determined iteratively from the Lagrange multiplier optimization", "iteratively from the Lagrange multiplier optimization of the varimax criterion. Adapted from: <NAME>.", "SalishSeaCast surface nitrate and temperature records. # # required for the analyses presented", "# Convergence if d_old != 0 and d/d_old < 1 + tol: break", "matrices of n x p data matrix z using SVD and optional varimax", "coords = {var: ds[var].values[slc] for var in ('x', 'y')} coords['time'] = tools.formattime(ds.time.values) landmask", "eigenvalue matrix is used to evaluate convergence. This version of the varimax algorithm", "(['y', 'x'], np.median(data[var][iseason, slc, slc], axis=0)), 'A': (['time', 'mode'], PCA['A']), 'B': (['time', 'mode'],", "PCA PCA = calc_PCA(z - z.mean(axis=0)[None, :]) # Build PCA results as xarray", "USA. Chapter 18: Analytical Rotations - Section 18.4: Simultaneous Factor Varimax Solution, Equations", "xr.open_dataset(results_path + 'MooreMaleyAllenOS2022_modelfields.nc') as ds: data = {var: ds[var].values for var in ('temperature',", "ds[var].values[slc] for var in ('x', 'y')} coords['time'] = tools.formattime(ds.time.values) landmask = ds.landmask.values #", "(['y', 'x', 'mode'], regrid(xflat, yflat, PCA['E'])), 'U': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['U'])),", "Code module for calculating the PCA matrices of the # SalishSeaCast surface nitrate", "'mode'], regrid(xflat, yflat, PCA['E'])), 'U': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['U'])), 'var': ('mode',", "Subsample and flatten raw = tools.flatten(data[var][:, slc, slc], landmask) # Subtract lowpass filter", "described in Section 18.4 has been reformulated to use SVD based on equivalent", "nitrate) coords['time'] = coords['time'][iseason] # Build flattened, subsampled coordinate arrays landmask = landmask[slc,", "Factor Varimax Solution, Equations 18.4.1-10, pp. 428-429 - Section 18.7: Mathematical Proofs, Equations", "# # $ cd scripts # $ python3 PCA.py /path/to/files import numpy as", "# $ cd scripts # $ python3 PCA.py /path/to/files import numpy as np", "A = A_prime.dot(np.diag(sqrtL)) A2 = A * A var = A2.sum(axis=0) / A2.sum()", "extract productive season z = np.vstack([col - tools.lowpass(col, cutoff) for col in raw.T]).T[iseason,", "Rotate matrices B = A.dot(R) B2 = B * B var_rot = B2.sum(axis=0)", "based on equivalent definitions for the rotation matrix described in Section 18.7. The", "landmask.ravel().astype(bool) xflat, yflat = [var.ravel()[maskflat] for var in np.meshgrid(x, y)] # Calculate EOFs", "#!/usr/bin/env python # # Code module for calculating the PCA matrices of the", "from tqdm import tqdm import tools def regrid(xflat, yflat, valuesflat): \"\"\"Regrid a flattened", "isort = var_rot.argsort()[::-1] # Return xarray-compatible netCDF dict PCA = {'A': A, 'E':", "------------------------------------------------------ 18.7.45 d = sum(Delta) # Convergence if d_old != 0 and d/d_old", "the PCA matrices of the # SalishSeaCast surface nitrate and temperature records. #", "output \"\"\" # Load aggregated results file slc = slice(None, None, subsample) with", "as ds: data = {var: ds[var].values for var in ('temperature', 'nitrate')} coords =", "Chapter 18: Analytical Rotations - Section 18.4: Simultaneous Factor Varimax Solution, Equations 18.4.1-10,", "for i in tqdm(range(maxiter), desc='Calculating rotation matrix'): d_old = d B = A.dot(H)", "E_T = np.linalg.svd(z, full_matrices=False) A = A_prime.dot(np.diag(sqrtL)) A2 = A * A var", "'x'], np.median(data[var][iseason, slc, slc], axis=0)), 'A': (['time', 'mode'], PCA['A']), 'B': (['time', 'mode'], PCA['B']),", "landpoints removed according to the corresponding xflat, yflat coordinate arrays. This function assumes", "# SalishSeaCast surface nitrate and temperature records. # # required for the analyses", "p PC loadings matrix A. H is determined iteratively from the Lagrange multiplier", "'B': B[:, isort], 'U': U[:, isort], 'var_rot': var_rot[isort]} return PCA def build_PCA_files(results_path, subsample=5,", "Proofs, Equations 18.7.32-54, pp. 437-438 The algorithm described in Section 18.4 has been", "valuesgridded = np.zeros(shape) for y, x, row in zip(yflat, xflat, valuesflat): valuesgridded[y, x,", "<NAME>. (1965) Factor Analysis of Data Matrices. Holt, Rinehart and Winston. New York,", "np.median(tools.flatten(data['nitrate'], waterpoints), axis=1) isegments, iseason = tools.calc_seasonal_indices(coords['time'], nitrate) coords['time'] = coords['time'][iseason] # Build", "{var: ds[var].values[slc] for var in ('x', 'y')} coords['time'] = tools.formattime(ds.time.values) landmask = ds.landmask.values", "Simultaneous Factor Varimax Solution, Equations 18.4.1-10, pp. 428-429 - Section 18.7: Mathematical Proofs,", "varimax rotation functions and build them to netCDF output \"\"\" # Load aggregated", "= tools.flatten(data[var][:, slc, slc], landmask) # Subtract lowpass filter and extract productive season", "in ('x', 'y')} coords['time'] = tools.formattime(ds.time.values) landmask = ds.landmask.values # Calculate seasonal indices", "rotation matrix R = varimax(A) # Rotate matrices B = A.dot(R) B2 =", "matrices of the # SalishSeaCast surface nitrate and temperature records. # # required", "= A * A var = A2.sum(axis=0) / A2.sum() E = E_T.T #", "E_T.T # Get varimax rotation matrix R = varimax(A) # Rotate matrices B", "netCDF dict PCA = {'A': A, 'E': E, 'var': var, 'B': B[:, isort],", "surface nitrate and temperature records. # # required for the analyses presented in:", "python3 PCA.py /path/to/files import numpy as np import xarray as xr import sys", "the principal component analysis and varimax rotation functions and build them to netCDF", "= B * B var_rot = B2.sum(axis=0) / B2.sum() U = E.dot(R) #", "y, x, row in zip(yflat, xflat, valuesflat): valuesgridded[y, x, :] = row return", "n x p data matrix z using SVD and optional varimax rotation \"\"\"", "d = 0 # Iteration for i in tqdm(range(maxiter), desc='Calculating rotation matrix'): d_old", "A2.sum(axis=0) / A2.sum() E = E_T.T # Get varimax rotation matrix R =", "'nitrate')} coords = {var: ds[var].values[slc] for var in ('x', 'y')} coords['time'] = tools.formattime(ds.time.values)", "y)] # Calculate EOFs for var in ['temperature', 'nitrate']: # Subsample and flatten", "arrays. This function assumes a 2D input shape for valuesflat of [space, mode].", "# Return xarray-compatible netCDF dict PCA = {'A': A, 'E': E, 'var': var,", "n, p = A.shape H = np.eye(p) d = 0 # Iteration for", "calculating the PCA matrices of the # SalishSeaCast surface nitrate and temperature records.", "0 # Iteration for i in tqdm(range(maxiter), desc='Calculating rotation matrix'): d_old = d", "results file slc = slice(None, None, subsample) with xr.open_dataset(results_path + 'MooreMaleyAllenOS2022_modelfields.nc') as ds:", "'A': (['time', 'mode'], PCA['A']), 'B': (['time', 'mode'], PCA['B']), 'E': (['y', 'x', 'mode'], regrid(xflat,", "= B2.sum(axis=0) / B2.sum() U = E.dot(R) # Sort rotated matrices isort =", "ds: data = {var: ds[var].values for var in ('temperature', 'nitrate')} coords = {var:", "matrices B = A.dot(R) B2 = B * B var_rot = B2.sum(axis=0) /", "Return xarray-compatible netCDF dict PCA = {'A': A, 'E': E, 'var': var, 'B':", "surface nutrient delivery in a semi-enclosed coastal sea, # Ocean Sci., 2022. #", "18.7. The eigenvalue matrix is used to evaluate convergence. This version of the", "<NAME> and <NAME>: Wind-driven upwelling and # surface nutrient delivery in a semi-enclosed", "Holt, Rinehart and Winston. New York, USA. Chapter 18: Analytical Rotations - Section", "with the landpoints removed according to the corresponding xflat, yflat coordinate arrays. This", "B * B var_rot = B2.sum(axis=0) / B2.sum() U = E.dot(R) # Sort", "Load aggregated results file slc = slice(None, None, subsample) with xr.open_dataset(results_path + 'MooreMaleyAllenOS2022_modelfields.nc')", "\"\"\"Calculate the varimax rotation matrix H from the n x p PC loadings", "identical to those found in Sci-kit learn, Matlab, R, and presumably others. \"\"\"", "U[:, isort], 'var_rot': var_rot[isort]} return PCA def build_PCA_files(results_path, subsample=5, cutoff=1235): \"\"\"Call the principal", "a 2D input shape for valuesflat of [space, mode]. \"\"\" shape = (max(yflat)+1,", "learn, Matlab, R, and presumably others. \"\"\" # Initialization n, p = A.shape", "* B * B - B.dot(np.diag(np.diag(B.T.dot(B)))) / n # ---------- 18.4.6 P, Delta,", "!= 0 and d/d_old < 1 + tol: break return H def calc_PCA(z):", "results as xarray Dataset and save to netCDF variables = { 'landmask': (['y',", "varimax(A) # Rotate matrices B = A.dot(R) B2 = B * B var_rot", "Iteration for i in tqdm(range(maxiter), desc='Calculating rotation matrix'): d_old = d B =", "full_matrices=False) A = A_prime.dot(np.diag(sqrtL)) A2 = A * A var = A2.sum(axis=0) /", "None, subsample) with xr.open_dataset(results_path + 'MooreMaleyAllenOS2022_modelfields.nc') as ds: data = {var: ds[var].values for", "Delta, Q_T = np.linalg.svd(A.T.dot(beta)) # ------------------------ 18.7.42 H = P.dot(Q_T) # ------------------------------------------------------ 18.7.45", "<filename>scripts/PCA.py #!/usr/bin/env python # # Code module for calculating the PCA matrices of", "in ['temperature', 'nitrate']: # Subsample and flatten raw = tools.flatten(data[var][:, slc, slc], landmask)", "np.eye(p) d = 0 # Iteration for i in tqdm(range(maxiter), desc='Calculating rotation matrix'):", "# Code module for calculating the PCA matrices of the # SalishSeaCast surface", "varimax rotation \"\"\" # Calculate orthogonal PCA matrices A_prime, sqrtL, E_T = np.linalg.svd(z,", "the landpoints removed according to the corresponding xflat, yflat coordinate arrays. This function", "isort], 'var_rot': var_rot[isort]} return PCA def build_PCA_files(results_path, subsample=5, cutoff=1235): \"\"\"Call the principal component", "18.7.42 H = P.dot(Q_T) # ------------------------------------------------------ 18.7.45 d = sum(Delta) # Convergence if", "coordinate arrays. This function assumes a 2D input shape for valuesflat of [space,", "in Section 18.4 has been reformulated to use SVD based on equivalent definitions", "corresponding xflat, yflat coordinate arrays. This function assumes a 2D input shape for", "'var': ('mode', PCA['var']), 'var_rot': ('mode', PCA['var_rot']), } xr.Dataset(variables, coords).to_netcdf(results_path + var + '_PCA.nc')", "fft from tqdm import tqdm import tools def regrid(xflat, yflat, valuesflat): \"\"\"Regrid a", "# Rotate matrices B = A.dot(R) B2 = B * B var_rot =", "<NAME>: Wind-driven upwelling and # surface nutrient delivery in a semi-enclosed coastal sea,", "# Subtract mean and calculate PCA PCA = calc_PCA(z - z.mean(axis=0)[None, :]) #", "waterpoints), axis=1) isegments, iseason = tools.calc_seasonal_indices(coords['time'], nitrate) coords['time'] = coords['time'][iseason] # Build flattened,", "in raw.T]).T[iseason, :] # Subtract mean and calculate PCA PCA = calc_PCA(z -", "* A var = A2.sum(axis=0) / A2.sum() E = E_T.T # Get varimax", "rotation matrix described in Section 18.7. The eigenvalue matrix is used to evaluate", "subsample) with xr.open_dataset(results_path + 'MooreMaleyAllenOS2022_modelfields.nc') as ds: data = {var: ds[var].values for var", "# Initialization n, p = A.shape H = np.eye(p) d = 0 #", "SVD and optional varimax rotation \"\"\" # Calculate orthogonal PCA matrices A_prime, sqrtL,", "xarray Dataset and save to netCDF variables = { 'landmask': (['y', 'x'], landmask),", "mode]. \"\"\" shape = (max(yflat)+1, max(xflat)+1, valuesflat.shape[1]) valuesgridded = np.zeros(shape) for y, x,", "sqrtL, E_T = np.linalg.svd(z, full_matrices=False) A = A_prime.dot(np.diag(sqrtL)) A2 = A * A", "rotated matrices isort = var_rot.argsort()[::-1] # Return xarray-compatible netCDF dict PCA = {'A':", "multiplier optimization of the varimax criterion. Adapted from: <NAME>. (1965) Factor Analysis of", "varimax rotation matrix R = varimax(A) # Rotate matrices B = A.dot(R) B2", "[space, mode]. \"\"\" shape = (max(yflat)+1, max(xflat)+1, valuesflat.shape[1]) valuesgridded = np.zeros(shape) for y,", "Matrices. Holt, Rinehart and Winston. New York, USA. Chapter 18: Analytical Rotations -", "PCA['U'])), 'var': ('mode', PCA['var']), 'var_rot': ('mode', PCA['var_rot']), } xr.Dataset(variables, coords).to_netcdf(results_path + var +", "using SVD and optional varimax rotation \"\"\" # Calculate orthogonal PCA matrices A_prime,", "as xarray Dataset and save to netCDF variables = { 'landmask': (['y', 'x'],", "tools.formattime(ds.time.values) landmask = ds.landmask.values # Calculate seasonal indices waterpoints = tools.openwaterpoints(landmask) nitrate =", "PCA matrices of the # SalishSeaCast surface nitrate and temperature records. # #", "in Sci-kit learn, Matlab, R, and presumably others. \"\"\" # Initialization n, p", "component analysis and varimax rotation functions and build them to netCDF output \"\"\"", "= { 'landmask': (['y', 'x'], landmask), 'median': (['y', 'x'], np.median(data[var][iseason, slc, slc], axis=0)),", "of [space, mode]. \"\"\" shape = (max(yflat)+1, max(xflat)+1, valuesflat.shape[1]) valuesgridded = np.zeros(shape) for", "Subtract mean and calculate PCA PCA = calc_PCA(z - z.mean(axis=0)[None, :]) # Build", "scripts # $ python3 PCA.py /path/to/files import numpy as np import xarray as", "def varimax(A, maxiter=40, tol=1e-5): \"\"\"Calculate the varimax rotation matrix H from the n", "yflat, PCA['E'])), 'U': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['U'])), 'var': ('mode', PCA['var']), 'var_rot':", "a flattened array with the landpoints removed according to the corresponding xflat, yflat", "= A.dot(R) B2 = B * B var_rot = B2.sum(axis=0) / B2.sum() U", "Rinehart and Winston. New York, USA. Chapter 18: Analytical Rotations - Section 18.4:", "used to evaluate convergence. This version of the varimax algorithm is functionally identical", "indices waterpoints = tools.openwaterpoints(landmask) nitrate = np.median(tools.flatten(data['nitrate'], waterpoints), axis=1) isegments, iseason = tools.calc_seasonal_indices(coords['time'],", "{var: ds[var].values for var in ('temperature', 'nitrate')} coords = {var: ds[var].values[slc] for var", "18.7.45 d = sum(Delta) # Convergence if d_old != 0 and d/d_old <", "'nitrate']: # Subsample and flatten raw = tools.flatten(data[var][:, slc, slc], landmask) # Subtract", "---------- 18.4.6 P, Delta, Q_T = np.linalg.svd(A.T.dot(beta)) # ------------------------ 18.7.42 H = P.dot(Q_T)", "= ds.landmask.values # Calculate seasonal indices waterpoints = tools.openwaterpoints(landmask) nitrate = np.median(tools.flatten(data['nitrate'], waterpoints),", "= {'A': A, 'E': E, 'var': var, 'B': B[:, isort], 'U': U[:, isort],", "matrix is used to evaluate convergence. This version of the varimax algorithm is", "others. \"\"\" # Initialization n, p = A.shape H = np.eye(p) d =", "described in Section 18.7. The eigenvalue matrix is used to evaluate convergence. This", "18.4.5 beta = B * B * B - B.dot(np.diag(np.diag(B.T.dot(B)))) / n #", "and Winston. New York, USA. Chapter 18: Analytical Rotations - Section 18.4: Simultaneous", "matrix R = varimax(A) # Rotate matrices B = A.dot(R) B2 = B", "break return H def calc_PCA(z): \"\"\"Calculate EOF matrices of n x p data", "New York, USA. Chapter 18: Analytical Rotations - Section 18.4: Simultaneous Factor Varimax", "in Section 18.7. The eigenvalue matrix is used to evaluate convergence. This version", "Section 18.7. The eigenvalue matrix is used to evaluate convergence. This version of", "landmask) # Subtract lowpass filter and extract productive season z = np.vstack([col -", "tqdm(range(maxiter), desc='Calculating rotation matrix'): d_old = d B = A.dot(H) # -------------------------------------------------------- 18.4.5", "EOF matrices of n x p data matrix z using SVD and optional", "/path/to/files import numpy as np import xarray as xr import sys from datetime", "cutoff) for col in raw.T]).T[iseason, :] # Subtract mean and calculate PCA PCA", "x, row in zip(yflat, xflat, valuesflat): valuesgridded[y, x, :] = row return valuesgridded", "------------------------ 18.7.42 H = P.dot(Q_T) # ------------------------------------------------------ 18.7.45 d = sum(Delta) # Convergence", "['temperature', 'nitrate']: # Subsample and flatten raw = tools.flatten(data[var][:, slc, slc], landmask) #", "and save to netCDF variables = { 'landmask': (['y', 'x'], landmask), 'median': (['y',", "sys from datetime import datetime, timedelta from scipy import signal, fft from tqdm", "B * B * B - B.dot(np.diag(np.diag(B.T.dot(B)))) / n # ---------- 18.4.6 P,", "input shape for valuesflat of [space, mode]. \"\"\" shape = (max(yflat)+1, max(xflat)+1, valuesflat.shape[1])", "B = A.dot(R) B2 = B * B var_rot = B2.sum(axis=0) / B2.sum()", "save to netCDF variables = { 'landmask': (['y', 'x'], landmask), 'median': (['y', 'x'],", "H def calc_PCA(z): \"\"\"Calculate EOF matrices of n x p data matrix z", "p data matrix z using SVD and optional varimax rotation \"\"\" # Calculate", "module for calculating the PCA matrices of the # SalishSeaCast surface nitrate and", "This function assumes a 2D input shape for valuesflat of [space, mode]. \"\"\"", "'E': E, 'var': var, 'B': B[:, isort], 'U': U[:, isort], 'var_rot': var_rot[isort]} return", "B2 = B * B var_rot = B2.sum(axis=0) / B2.sum() U = E.dot(R)", "d_old != 0 and d/d_old < 1 + tol: break return H def", "numpy as np import xarray as xr import sys from datetime import datetime,", "netCDF variables = { 'landmask': (['y', 'x'], landmask), 'median': (['y', 'x'], np.median(data[var][iseason, slc,", "var_rot.argsort()[::-1] # Return xarray-compatible netCDF dict PCA = {'A': A, 'E': E, 'var':", "x, :] = row return valuesgridded def varimax(A, maxiter=40, tol=1e-5): \"\"\"Calculate the varimax", "B2.sum() U = E.dot(R) # Sort rotated matrices isort = var_rot.argsort()[::-1] # Return", "'MooreMaleyAllenOS2022_modelfields.nc') as ds: data = {var: ds[var].values for var in ('temperature', 'nitrate')} coords", "E.dot(R) # Sort rotated matrices isort = var_rot.argsort()[::-1] # Return xarray-compatible netCDF dict", "Initialization n, p = A.shape H = np.eye(p) d = 0 # Iteration", "= row return valuesgridded def varimax(A, maxiter=40, tol=1e-5): \"\"\"Calculate the varimax rotation matrix", "rotation \"\"\" # Calculate orthogonal PCA matrices A_prime, sqrtL, E_T = np.linalg.svd(z, full_matrices=False)", "(['time', 'mode'], PCA['A']), 'B': (['time', 'mode'], PCA['B']), 'E': (['y', 'x', 'mode'], regrid(xflat, yflat,", "\"\"\" shape = (max(yflat)+1, max(xflat)+1, valuesflat.shape[1]) valuesgridded = np.zeros(shape) for y, x, row", "from scipy import signal, fft from tqdm import tqdm import tools def regrid(xflat,", "to the corresponding xflat, yflat coordinate arrays. This function assumes a 2D input", "row in zip(yflat, xflat, valuesflat): valuesgridded[y, x, :] = row return valuesgridded def", "build them to netCDF output \"\"\" # Load aggregated results file slc =", "A.shape H = np.eye(p) d = 0 # Iteration for i in tqdm(range(maxiter),", "Convergence if d_old != 0 and d/d_old < 1 + tol: break return", "P.dot(Q_T) # ------------------------------------------------------ 18.7.45 d = sum(Delta) # Convergence if d_old != 0", "tol=1e-5): \"\"\"Calculate the varimax rotation matrix H from the n x p PC", "nitrate and temperature records. # # required for the analyses presented in: #", "- B.dot(np.diag(np.diag(B.T.dot(B)))) / n # ---------- 18.4.6 P, Delta, Q_T = np.linalg.svd(A.T.dot(beta)) #", "varimax rotation matrix H from the n x p PC loadings matrix A.", "-------------------------------------------------------- 18.4.5 beta = B * B * B - B.dot(np.diag(np.diag(B.T.dot(B)))) / n", "# Calculate EOFs for var in ['temperature', 'nitrate']: # Subsample and flatten raw", "to evaluate convergence. This version of the varimax algorithm is functionally identical to", "aggregated results file slc = slice(None, None, subsample) with xr.open_dataset(results_path + 'MooreMaleyAllenOS2022_modelfields.nc') as", "slc] y, x = [range(dim) for dim in landmask.shape] maskflat = landmask.ravel().astype(bool) xflat,", "var in np.meshgrid(x, y)] # Calculate EOFs for var in ['temperature', 'nitrate']: #", "Solution, Equations 18.4.1-10, pp. 428-429 - Section 18.7: Mathematical Proofs, Equations 18.7.32-54, pp.", "= P.dot(Q_T) # ------------------------------------------------------ 18.7.45 d = sum(Delta) # Convergence if d_old !=", "and optional varimax rotation \"\"\" # Calculate orthogonal PCA matrices A_prime, sqrtL, E_T", "for valuesflat of [space, mode]. \"\"\" shape = (max(yflat)+1, max(xflat)+1, valuesflat.shape[1]) valuesgridded =", "max(xflat)+1, valuesflat.shape[1]) valuesgridded = np.zeros(shape) for y, x, row in zip(yflat, xflat, valuesflat):", "a semi-enclosed coastal sea, # Ocean Sci., 2022. # # $ cd scripts", "matrix H from the n x p PC loadings matrix A. H is", "Wind-driven upwelling and # surface nutrient delivery in a semi-enclosed coastal sea, #", "algorithm described in Section 18.4 has been reformulated to use SVD based on", "{'A': A, 'E': E, 'var': var, 'B': B[:, isort], 'U': U[:, isort], 'var_rot':", "tqdm import tqdm import tools def regrid(xflat, yflat, valuesflat): \"\"\"Regrid a flattened array", "ds[var].values for var in ('temperature', 'nitrate')} coords = {var: ds[var].values[slc] for var in", "and d/d_old < 1 + tol: break return H def calc_PCA(z): \"\"\"Calculate EOF", "determined iteratively from the Lagrange multiplier optimization of the varimax criterion. Adapted from:", "= A2.sum(axis=0) / A2.sum() E = E_T.T # Get varimax rotation matrix R", "import xarray as xr import sys from datetime import datetime, timedelta from scipy", "regrid(xflat, yflat, valuesflat): \"\"\"Regrid a flattened array with the landpoints removed according to", "and flatten raw = tools.flatten(data[var][:, slc, slc], landmask) # Subtract lowpass filter and", "slc], axis=0)), 'A': (['time', 'mode'], PCA['A']), 'B': (['time', 'mode'], PCA['B']), 'E': (['y', 'x',", "Analytical Rotations - Section 18.4: Simultaneous Factor Varimax Solution, Equations 18.4.1-10, pp. 428-429", "# Get varimax rotation matrix R = varimax(A) # Rotate matrices B =", "A.dot(H) # -------------------------------------------------------- 18.4.5 beta = B * B * B - B.dot(np.diag(np.diag(B.T.dot(B))))", "slc], landmask) # Subtract lowpass filter and extract productive season z = np.vstack([col", "the varimax rotation matrix H from the n x p PC loadings matrix", "18.7.32-54, pp. 437-438 The algorithm described in Section 18.4 has been reformulated to", "= [var.ravel()[maskflat] for var in np.meshgrid(x, y)] # Calculate EOFs for var in", "and <NAME>: Wind-driven upwelling and # surface nutrient delivery in a semi-enclosed coastal", "= tools.openwaterpoints(landmask) nitrate = np.median(tools.flatten(data['nitrate'], waterpoints), axis=1) isegments, iseason = tools.calc_seasonal_indices(coords['time'], nitrate) coords['time']", "# Subsample and flatten raw = tools.flatten(data[var][:, slc, slc], landmask) # Subtract lowpass", "datetime, timedelta from scipy import signal, fft from tqdm import tqdm import tools", "= np.median(tools.flatten(data['nitrate'], waterpoints), axis=1) isegments, iseason = tools.calc_seasonal_indices(coords['time'], nitrate) coords['time'] = coords['time'][iseason] #", "coordinate arrays landmask = landmask[slc, slc] y, x = [range(dim) for dim in", "np.zeros(shape) for y, x, row in zip(yflat, xflat, valuesflat): valuesgridded[y, x, :] =", "mean and calculate PCA PCA = calc_PCA(z - z.mean(axis=0)[None, :]) # Build PCA", "'mode'], PCA['A']), 'B': (['time', 'mode'], PCA['B']), 'E': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['E'])),", "= {var: ds[var].values[slc] for var in ('x', 'y')} coords['time'] = tools.formattime(ds.time.values) landmask =", "* B var_rot = B2.sum(axis=0) / B2.sum() U = E.dot(R) # Sort rotated", "i in tqdm(range(maxiter), desc='Calculating rotation matrix'): d_old = d B = A.dot(H) #", "required for the analyses presented in: # # <NAME> and <NAME>: Wind-driven upwelling", "np.meshgrid(x, y)] # Calculate EOFs for var in ['temperature', 'nitrate']: # Subsample and", "is functionally identical to those found in Sci-kit learn, Matlab, R, and presumably", "calc_PCA(z - z.mean(axis=0)[None, :]) # Build PCA results as xarray Dataset and save", "/ n # ---------- 18.4.6 P, Delta, Q_T = np.linalg.svd(A.T.dot(beta)) # ------------------------ 18.7.42", "Rotations - Section 18.4: Simultaneous Factor Varimax Solution, Equations 18.4.1-10, pp. 428-429 -", "presumably others. \"\"\" # Initialization n, p = A.shape H = np.eye(p) d", "landmask[slc, slc] y, x = [range(dim) for dim in landmask.shape] maskflat = landmask.ravel().astype(bool)", "= np.zeros(shape) for y, x, row in zip(yflat, xflat, valuesflat): valuesgridded[y, x, :]", "H is determined iteratively from the Lagrange multiplier optimization of the varimax criterion.", "= [range(dim) for dim in landmask.shape] maskflat = landmask.ravel().astype(bool) xflat, yflat = [var.ravel()[maskflat]", "+ 'MooreMaleyAllenOS2022_modelfields.nc') as ds: data = {var: ds[var].values for var in ('temperature', 'nitrate')}", "regrid(xflat, yflat, PCA['E'])), 'U': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['U'])), 'var': ('mode', PCA['var']),", "B * B - B.dot(np.diag(np.diag(B.T.dot(B)))) / n # ---------- 18.4.6 P, Delta, Q_T", "the rotation matrix described in Section 18.7. The eigenvalue matrix is used to", "varimax criterion. Adapted from: <NAME>. (1965) Factor Analysis of Data Matrices. Holt, Rinehart", "The eigenvalue matrix is used to evaluate convergence. This version of the varimax", "'x'], landmask), 'median': (['y', 'x'], np.median(data[var][iseason, slc, slc], axis=0)), 'A': (['time', 'mode'], PCA['A']),", "cd scripts # $ python3 PCA.py /path/to/files import numpy as np import xarray", "[var.ravel()[maskflat] for var in np.meshgrid(x, y)] # Calculate EOFs for var in ['temperature',", "landmask = landmask[slc, slc] y, x = [range(dim) for dim in landmask.shape] maskflat", "Section 18.7: Mathematical Proofs, Equations 18.7.32-54, pp. 437-438 The algorithm described in Section", "= {var: ds[var].values for var in ('temperature', 'nitrate')} coords = {var: ds[var].values[slc] for", "# -------------------------------------------------------- 18.4.5 beta = B * B * B - B.dot(np.diag(np.diag(B.T.dot(B)))) /", "y, x = [range(dim) for dim in landmask.shape] maskflat = landmask.ravel().astype(bool) xflat, yflat", "B.dot(np.diag(np.diag(B.T.dot(B)))) / n # ---------- 18.4.6 P, Delta, Q_T = np.linalg.svd(A.T.dot(beta)) # ------------------------", "# ---------- 18.4.6 P, Delta, Q_T = np.linalg.svd(A.T.dot(beta)) # ------------------------ 18.7.42 H =", "np.vstack([col - tools.lowpass(col, cutoff) for col in raw.T]).T[iseason, :] # Subtract mean and", "np.median(data[var][iseason, slc, slc], axis=0)), 'A': (['time', 'mode'], PCA['A']), 'B': (['time', 'mode'], PCA['B']), 'E':", "in landmask.shape] maskflat = landmask.ravel().astype(bool) xflat, yflat = [var.ravel()[maskflat] for var in np.meshgrid(x,", "Winston. New York, USA. Chapter 18: Analytical Rotations - Section 18.4: Simultaneous Factor", "array with the landpoints removed according to the corresponding xflat, yflat coordinate arrays.", "var, 'B': B[:, isort], 'U': U[:, isort], 'var_rot': var_rot[isort]} return PCA def build_PCA_files(results_path,", "E, 'var': var, 'B': B[:, isort], 'U': U[:, isort], 'var_rot': var_rot[isort]} return PCA", "# surface nutrient delivery in a semi-enclosed coastal sea, # Ocean Sci., 2022.", "= E_T.T # Get varimax rotation matrix R = varimax(A) # Rotate matrices", "and # surface nutrient delivery in a semi-enclosed coastal sea, # Ocean Sci.,", "optional varimax rotation \"\"\" # Calculate orthogonal PCA matrices A_prime, sqrtL, E_T =", "# Calculate seasonal indices waterpoints = tools.openwaterpoints(landmask) nitrate = np.median(tools.flatten(data['nitrate'], waterpoints), axis=1) isegments,", "\"\"\" # Initialization n, p = A.shape H = np.eye(p) d = 0", "maxiter=40, tol=1e-5): \"\"\"Calculate the varimax rotation matrix H from the n x p", "+ tol: break return H def calc_PCA(z): \"\"\"Calculate EOF matrices of n x", "valuesflat.shape[1]) valuesgridded = np.zeros(shape) for y, x, row in zip(yflat, xflat, valuesflat): valuesgridded[y,", "varimax algorithm is functionally identical to those found in Sci-kit learn, Matlab, R,", "row return valuesgridded def varimax(A, maxiter=40, tol=1e-5): \"\"\"Calculate the varimax rotation matrix H", "signal, fft from tqdm import tqdm import tools def regrid(xflat, yflat, valuesflat): \"\"\"Regrid", "Varimax Solution, Equations 18.4.1-10, pp. 428-429 - Section 18.7: Mathematical Proofs, Equations 18.7.32-54,", "Lagrange multiplier optimization of the varimax criterion. Adapted from: <NAME>. (1965) Factor Analysis", "for col in raw.T]).T[iseason, :] # Subtract mean and calculate PCA PCA =", "pp. 437-438 The algorithm described in Section 18.4 has been reformulated to use", "from the Lagrange multiplier optimization of the varimax criterion. Adapted from: <NAME>. (1965)", "functionally identical to those found in Sci-kit learn, Matlab, R, and presumably others.", "season z = np.vstack([col - tools.lowpass(col, cutoff) for col in raw.T]).T[iseason, :] #", "optimization of the varimax criterion. Adapted from: <NAME>. (1965) Factor Analysis of Data", "# Iteration for i in tqdm(range(maxiter), desc='Calculating rotation matrix'): d_old = d B", "sea, # Ocean Sci., 2022. # # $ cd scripts # $ python3", "yflat coordinate arrays. This function assumes a 2D input shape for valuesflat of", "shape = (max(yflat)+1, max(xflat)+1, valuesflat.shape[1]) valuesgridded = np.zeros(shape) for y, x, row in", "P, Delta, Q_T = np.linalg.svd(A.T.dot(beta)) # ------------------------ 18.7.42 H = P.dot(Q_T) # ------------------------------------------------------", "Subtract lowpass filter and extract productive season z = np.vstack([col - tools.lowpass(col, cutoff)", "those found in Sci-kit learn, Matlab, R, and presumably others. \"\"\" # Initialization", "matrix A. H is determined iteratively from the Lagrange multiplier optimization of the", "= var_rot.argsort()[::-1] # Return xarray-compatible netCDF dict PCA = {'A': A, 'E': E,", "seasonal indices waterpoints = tools.openwaterpoints(landmask) nitrate = np.median(tools.flatten(data['nitrate'], waterpoints), axis=1) isegments, iseason =", "'mode'], regrid(xflat, yflat, PCA['U'])), 'var': ('mode', PCA['var']), 'var_rot': ('mode', PCA['var_rot']), } xr.Dataset(variables, coords).to_netcdf(results_path", "as xr import sys from datetime import datetime, timedelta from scipy import signal,", "of the varimax criterion. Adapted from: <NAME>. (1965) Factor Analysis of Data Matrices.", "orthogonal PCA matrices A_prime, sqrtL, E_T = np.linalg.svd(z, full_matrices=False) A = A_prime.dot(np.diag(sqrtL)) A2", "matrices A_prime, sqrtL, E_T = np.linalg.svd(z, full_matrices=False) A = A_prime.dot(np.diag(sqrtL)) A2 = A", "PCA def build_PCA_files(results_path, subsample=5, cutoff=1235): \"\"\"Call the principal component analysis and varimax rotation", "'x', 'mode'], regrid(xflat, yflat, PCA['U'])), 'var': ('mode', PCA['var']), 'var_rot': ('mode', PCA['var_rot']), } xr.Dataset(variables,", "and build them to netCDF output \"\"\" # Load aggregated results file slc", "(['y', 'x', 'mode'], regrid(xflat, yflat, PCA['U'])), 'var': ('mode', PCA['var']), 'var_rot': ('mode', PCA['var_rot']), }", "import signal, fft from tqdm import tqdm import tools def regrid(xflat, yflat, valuesflat):", "of n x p data matrix z using SVD and optional varimax rotation", ":] # Subtract mean and calculate PCA PCA = calc_PCA(z - z.mean(axis=0)[None, :])", "('temperature', 'nitrate')} coords = {var: ds[var].values[slc] for var in ('x', 'y')} coords['time'] =", "The algorithm described in Section 18.4 has been reformulated to use SVD based", "def build_PCA_files(results_path, subsample=5, cutoff=1235): \"\"\"Call the principal component analysis and varimax rotation functions", "evaluate convergence. This version of the varimax algorithm is functionally identical to those", "= np.linalg.svd(z, full_matrices=False) A = A_prime.dot(np.diag(sqrtL)) A2 = A * A var =", "= tools.calc_seasonal_indices(coords['time'], nitrate) coords['time'] = coords['time'][iseason] # Build flattened, subsampled coordinate arrays landmask", "return PCA def build_PCA_files(results_path, subsample=5, cutoff=1235): \"\"\"Call the principal component analysis and varimax", "# Build flattened, subsampled coordinate arrays landmask = landmask[slc, slc] y, x =", "18.4 has been reformulated to use SVD based on equivalent definitions for the", "Calculate orthogonal PCA matrices A_prime, sqrtL, E_T = np.linalg.svd(z, full_matrices=False) A = A_prime.dot(np.diag(sqrtL))", "of the # SalishSeaCast surface nitrate and temperature records. # # required for", "xarray as xr import sys from datetime import datetime, timedelta from scipy import", "datetime import datetime, timedelta from scipy import signal, fft from tqdm import tqdm", "(['y', 'x'], landmask), 'median': (['y', 'x'], np.median(data[var][iseason, slc, slc], axis=0)), 'A': (['time', 'mode'],", "PCA = calc_PCA(z - z.mean(axis=0)[None, :]) # Build PCA results as xarray Dataset", "regrid(xflat, yflat, PCA['U'])), 'var': ('mode', PCA['var']), 'var_rot': ('mode', PCA['var_rot']), } xr.Dataset(variables, coords).to_netcdf(results_path +", "filter and extract productive season z = np.vstack([col - tools.lowpass(col, cutoff) for col", "the varimax algorithm is functionally identical to those found in Sci-kit learn, Matlab,", "raw.T]).T[iseason, :] # Subtract mean and calculate PCA PCA = calc_PCA(z - z.mean(axis=0)[None,", "coastal sea, # Ocean Sci., 2022. # # $ cd scripts # $", "lowpass filter and extract productive season z = np.vstack([col - tools.lowpass(col, cutoff) for", "$ cd scripts # $ python3 PCA.py /path/to/files import numpy as np import", "beta = B * B * B - B.dot(np.diag(np.diag(B.T.dot(B)))) / n # ----------", "rotation matrix'): d_old = d B = A.dot(H) # -------------------------------------------------------- 18.4.5 beta =", "waterpoints = tools.openwaterpoints(landmask) nitrate = np.median(tools.flatten(data['nitrate'], waterpoints), axis=1) isegments, iseason = tools.calc_seasonal_indices(coords['time'], nitrate)", "of Data Matrices. Holt, Rinehart and Winston. New York, USA. Chapter 18: Analytical", "n # ---------- 18.4.6 P, Delta, Q_T = np.linalg.svd(A.T.dot(beta)) # ------------------------ 18.7.42 H", "reformulated to use SVD based on equivalent definitions for the rotation matrix described", "Factor Analysis of Data Matrices. Holt, Rinehart and Winston. New York, USA. Chapter", "A2.sum() E = E_T.T # Get varimax rotation matrix R = varimax(A) #", "# Load aggregated results file slc = slice(None, None, subsample) with xr.open_dataset(results_path +", "d = sum(Delta) # Convergence if d_old != 0 and d/d_old < 1", "= sum(Delta) # Convergence if d_old != 0 and d/d_old < 1 +", "(['time', 'mode'], PCA['B']), 'E': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['E'])), 'U': (['y', 'x',", "B2.sum(axis=0) / B2.sum() U = E.dot(R) # Sort rotated matrices isort = var_rot.argsort()[::-1]", "np.linalg.svd(A.T.dot(beta)) # ------------------------ 18.7.42 H = P.dot(Q_T) # ------------------------------------------------------ 18.7.45 d = sum(Delta)", "found in Sci-kit learn, Matlab, R, and presumably others. \"\"\" # Initialization n,", "of the varimax algorithm is functionally identical to those found in Sci-kit learn,", "yflat, valuesflat): \"\"\"Regrid a flattened array with the landpoints removed according to the", "var_rot = B2.sum(axis=0) / B2.sum() U = E.dot(R) # Sort rotated matrices isort", "to netCDF output \"\"\" # Load aggregated results file slc = slice(None, None,", "PCA['B']), 'E': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['E'])), 'U': (['y', 'x', 'mode'], regrid(xflat,", "= A.shape H = np.eye(p) d = 0 # Iteration for i in", "variables = { 'landmask': (['y', 'x'], landmask), 'median': (['y', 'x'], np.median(data[var][iseason, slc, slc],", "in a semi-enclosed coastal sea, # Ocean Sci., 2022. # # $ cd", "return H def calc_PCA(z): \"\"\"Calculate EOF matrices of n x p data matrix", "(1965) Factor Analysis of Data Matrices. Holt, Rinehart and Winston. New York, USA.", "the Lagrange multiplier optimization of the varimax criterion. Adapted from: <NAME>. (1965) Factor", "np.linalg.svd(z, full_matrices=False) A = A_prime.dot(np.diag(sqrtL)) A2 = A * A var = A2.sum(axis=0)", "'var_rot': var_rot[isort]} return PCA def build_PCA_files(results_path, subsample=5, cutoff=1235): \"\"\"Call the principal component analysis", "matrix z using SVD and optional varimax rotation \"\"\" # Calculate orthogonal PCA", "on equivalent definitions for the rotation matrix described in Section 18.7. The eigenvalue", "dict PCA = {'A': A, 'E': E, 'var': var, 'B': B[:, isort], 'U':", "np import xarray as xr import sys from datetime import datetime, timedelta from", "Equations 18.7.32-54, pp. 437-438 The algorithm described in Section 18.4 has been reformulated", "= np.vstack([col - tools.lowpass(col, cutoff) for col in raw.T]).T[iseason, :] # Subtract mean", "= calc_PCA(z - z.mean(axis=0)[None, :]) # Build PCA results as xarray Dataset and", "# # <NAME> and <NAME>: Wind-driven upwelling and # surface nutrient delivery in", "Section 18.4: Simultaneous Factor Varimax Solution, Equations 18.4.1-10, pp. 428-429 - Section 18.7:", "A var = A2.sum(axis=0) / A2.sum() E = E_T.T # Get varimax rotation", "A.dot(R) B2 = B * B var_rot = B2.sum(axis=0) / B2.sum() U =", "A, 'E': E, 'var': var, 'B': B[:, isort], 'U': U[:, isort], 'var_rot': var_rot[isort]}", "'mode'], PCA['B']), 'E': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['E'])), 'U': (['y', 'x', 'mode'],", "def regrid(xflat, yflat, valuesflat): \"\"\"Regrid a flattened array with the landpoints removed according", "isort], 'U': U[:, isort], 'var_rot': var_rot[isort]} return PCA def build_PCA_files(results_path, subsample=5, cutoff=1235): \"\"\"Call", "ds.landmask.values # Calculate seasonal indices waterpoints = tools.openwaterpoints(landmask) nitrate = np.median(tools.flatten(data['nitrate'], waterpoints), axis=1)", "for calculating the PCA matrices of the # SalishSeaCast surface nitrate and temperature", "\"\"\" # Calculate orthogonal PCA matrices A_prime, sqrtL, E_T = np.linalg.svd(z, full_matrices=False) A", "PCA['A']), 'B': (['time', 'mode'], PCA['B']), 'E': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['E'])), 'U':", "semi-enclosed coastal sea, # Ocean Sci., 2022. # # $ cd scripts #", "axis=0)), 'A': (['time', 'mode'], PCA['A']), 'B': (['time', 'mode'], PCA['B']), 'E': (['y', 'x', 'mode'],", "in: # # <NAME> and <NAME>: Wind-driven upwelling and # surface nutrient delivery", "A. H is determined iteratively from the Lagrange multiplier optimization of the varimax", "desc='Calculating rotation matrix'): d_old = d B = A.dot(H) # -------------------------------------------------------- 18.4.5 beta", "assumes a 2D input shape for valuesflat of [space, mode]. \"\"\" shape =", "18: Analytical Rotations - Section 18.4: Simultaneous Factor Varimax Solution, Equations 18.4.1-10, pp.", "(max(yflat)+1, max(xflat)+1, valuesflat.shape[1]) valuesgridded = np.zeros(shape) for y, x, row in zip(yflat, xflat,", "to netCDF variables = { 'landmask': (['y', 'x'], landmask), 'median': (['y', 'x'], np.median(data[var][iseason,", "A_prime, sqrtL, E_T = np.linalg.svd(z, full_matrices=False) A = A_prime.dot(np.diag(sqrtL)) A2 = A *", "def calc_PCA(z): \"\"\"Calculate EOF matrices of n x p data matrix z using", "the analyses presented in: # # <NAME> and <NAME>: Wind-driven upwelling and #" ]
[ "<reponame>shafikshaon/daybook<gh_stars>0 __author__ = '<NAME>' from django.db import models class GistManager(models.Manager): def get_queryset(self): return", "= '<NAME>' from django.db import models class GistManager(models.Manager): def get_queryset(self): return super(GistManager, self).get_queryset().filter(is_delete=False)", "__author__ = '<NAME>' from django.db import models class GistManager(models.Manager): def get_queryset(self): return super(GistManager," ]
[ "making a copy. \"\"\" super().__init__() set_attributes(self, locals()) def forward(self, x: List[torch.Tensor]) -> torch.Tensor:", "not None self.blocks = blocks init_net_weights(self) def forward(self, x: torch.Tensor) -> torch.Tensor: for", "list ), \"input for MultiPathWayWithFuse needs to be a list of tensors\" if", "accociated bounding boxes. The format is N*5 (Index, X_1,Y_1,X_2,Y_2) if using RoIAlign and", "in range(len(self.multipathway_blocks)): if self.multipathway_blocks[pathway_idx] is not None: x_out[pathway_idx] = self.multipathway_blocks[pathway_idx]( x[pathway_idx] ) if", "List[torch.Tensor]) -> torch.Tensor: assert isinstance( x, list ), \"input for MultiPathWayWithFuse needs to", "model that handles bounding boxes as part of input. \"\"\" def __init__(self, model:", "super().__init__() set_attributes(self, locals()) def forward(self, x: List[torch.Tensor]) -> torch.Tensor: assert isinstance( x, list", "inplace (bool): If inplace, directly update the input list without making a copy.", "not None: x_out[pathway_idx] = self.multipathway_blocks[pathway_idx]( x[pathway_idx] ) if self.multipathway_fusion is not None: x_out", "is not None self.blocks = blocks init_net_weights(self) def forward(self, x: torch.Tensor) -> torch.Tensor:", "x_out = x else: x_out = [None] * len(x) for pathway_idx in range(len(self.multipathway_blocks)):", "copy. \"\"\" super().__init__() set_attributes(self, locals()) def forward(self, x: List[torch.Tensor]) -> torch.Tensor: assert isinstance(", "N ↓ ↓ Block 1 Block N ↓⭠ --Fusion----↓ \"\"\" def __init__( self,", "torch.Tensor, bboxes: torch.Tensor): \"\"\" Args: x (torch.tensor): input tensor bboxes (torch.tensor): accociated bounding", "blocks (torch.nn.module_list): the list of block modules. \"\"\" super().__init__() assert blocks is not", "of tensors\" if self.inplace: x_out = x else: x_out = [None] * len(x)", "if using RoIAlignRotated. \"\"\" features = self.model(x) out = self.detection_head(features, bboxes) return out.view(out.shape[0],", "list of models from all pathways. multipathway_fusion (nn.module): fusion model. inplace (bool): If", "list of blocks for video recognition. :: Input ↓ Block 1 ↓ .", "len(x) for pathway_idx in range(len(self.multipathway_blocks)): if self.multipathway_blocks[pathway_idx] is not None: x_out[pathway_idx] = self.multipathway_blocks[pathway_idx](", "super().__init__() self.model = model self.detection_head = detection_head def forward(self, x: torch.Tensor, bboxes: torch.Tensor):", "of the pathway contains its own Blocks and Fusion layers across different pathways.", "+ stages. detection_head (nn.Module): a network head. that can take in input bounding", "= self.model(x) out = self.detection_head(features, bboxes) return out.view(out.shape[0], -1) class MultiPathWayWithFuse(nn.Module): \"\"\" Build", "bounding boxes and the outputs from the model. \"\"\" super().__init__() self.model = model", "head. that can take in input bounding boxes and the outputs from the", "(nn.module_list): list of models from all pathways. multipathway_fusion (nn.module): fusion model. inplace (bool):", "bboxes (torch.tensor): accociated bounding boxes. The format is N*5 (Index, X_1,Y_1,X_2,Y_2) if using", "import torch.nn as nn from pytorchvideo.layers.utils import set_attributes from pytorchvideo.models.weight_init import init_net_weights class", "in range(len(self.blocks)): x = self.blocks[idx](x) return x class DetectionBBoxNetwork(nn.Module): \"\"\" A general purpose", "\"\"\" A general purpose model that handles bounding boxes as part of input.", "input bounding boxes and the outputs from the model. \"\"\" super().__init__() self.model =", "different pathways. :: Pathway 1 ... Pathway N ↓ ↓ Block 1 Block", "self.model = model self.detection_head = detection_head def forward(self, x: torch.Tensor, bboxes: torch.Tensor): \"\"\"", "using RoIAlignRotated. \"\"\" features = self.model(x) out = self.detection_head(features, bboxes) return out.view(out.shape[0], -1)", "own Blocks and Fusion layers across different pathways. :: Pathway 1 ... Pathway", "-1) class MultiPathWayWithFuse(nn.Module): \"\"\" Build multi-pathway block with fusion for video recognition, each", "tensors\" if self.inplace: x_out = x else: x_out = [None] * len(x) for", "from all pathways. multipathway_fusion (nn.module): fusion model. inplace (bool): If inplace, directly update", "import torch import torch.nn as nn from pytorchvideo.layers.utils import set_attributes from pytorchvideo.models.weight_init import", "boxes. The format is N*5 (Index, X_1,Y_1,X_2,Y_2) if using RoIAlign and N*6 (Index,", "in `create_resnet`. \"\"\" def __init__(self, *, blocks: nn.ModuleList) -> None: \"\"\" Args: blocks", "= True, ) -> None: \"\"\" Args: multipathway_blocks (nn.module_list): list of models from", "layers across different pathways. :: Pathway 1 ... Pathway N ↓ ↓ Block", "all pathways. multipathway_fusion (nn.module): fusion model. inplace (bool): If inplace, directly update the", "assert blocks is not None self.blocks = blocks init_net_weights(self) def forward(self, x: torch.Tensor)", "self.multipathway_blocks[pathway_idx] is not None: x_out[pathway_idx] = self.multipathway_blocks[pathway_idx]( x[pathway_idx] ) if self.multipathway_fusion is not", "Optional import torch import torch.nn as nn from pytorchvideo.layers.utils import set_attributes from pytorchvideo.models.weight_init", "for video recognition, each of the pathway contains its own Blocks and Fusion", "Pathway N ↓ ↓ Block 1 Block N ↓⭠ --Fusion----↓ \"\"\" def __init__(", "(torch.nn.module_list): the list of block modules. \"\"\" super().__init__() assert blocks is not None", "fusion model. inplace (bool): If inplace, directly update the input list without making", "return x class DetectionBBoxNetwork(nn.Module): \"\"\" A general purpose model that handles bounding boxes", "return out.view(out.shape[0], -1) class MultiPathWayWithFuse(nn.Module): \"\"\" Build multi-pathway block with fusion for video", "stem + stages. detection_head (nn.Module): a network head. that can take in input", "multipathway_fusion: Optional[nn.Module], inplace: Optional[bool] = True, ) -> None: \"\"\" Args: multipathway_blocks (nn.module_list):", "x_out = [None] * len(x) for pathway_idx in range(len(self.multipathway_blocks)): if self.multipathway_blocks[pathway_idx] is not", "x class DetectionBBoxNetwork(nn.Module): \"\"\" A general purpose model that handles bounding boxes as", "be found in `create_resnet`. \"\"\" def __init__(self, *, blocks: nn.ModuleList) -> None: \"\"\"", "self.blocks = blocks init_net_weights(self) def forward(self, x: torch.Tensor) -> torch.Tensor: for idx in", "blocks for video recognition. :: Input ↓ Block 1 ↓ . . .", "Args: model (nn.Module): a model that preceeds the head. Ex: stem + stages.", "super().__init__() assert blocks is not None self.blocks = blocks init_net_weights(self) def forward(self, x:", "List, Optional import torch import torch.nn as nn from pytorchvideo.layers.utils import set_attributes from", "and its affiliates. All Rights Reserved. from typing import List, Optional import torch", "(nn.module): fusion model. inplace (bool): If inplace, directly update the input list without", "blocks init_net_weights(self) def forward(self, x: torch.Tensor) -> torch.Tensor: for idx in range(len(self.blocks)): x", "directly update the input list without making a copy. \"\"\" super().__init__() set_attributes(self, locals())", "None: \"\"\" Args: multipathway_blocks (nn.module_list): list of models from all pathways. multipathway_fusion (nn.module):", "def forward(self, x: torch.Tensor, bboxes: torch.Tensor): \"\"\" Args: x (torch.tensor): input tensor bboxes", "Input ↓ Block 1 ↓ . . . ↓ Block N ↓ The", "found in `create_resnet`. \"\"\" def __init__(self, *, blocks: nn.ModuleList) -> None: \"\"\" Args:", "Net models with a list of blocks for video recognition. :: Input ↓", "*, blocks: nn.ModuleList) -> None: \"\"\" Args: blocks (torch.nn.module_list): the list of block", "nn.Module): \"\"\" Args: model (nn.Module): a model that preceeds the head. Ex: stem", "forward(self, x: List[torch.Tensor]) -> torch.Tensor: assert isinstance( x, list ), \"input for MultiPathWayWithFuse", "↓ ↓ Block 1 Block N ↓⭠ --Fusion----↓ \"\"\" def __init__( self, *,", "blocks: nn.ModuleList) -> None: \"\"\" Args: blocks (torch.nn.module_list): the list of block modules.", "model: nn.Module, detection_head: nn.Module): \"\"\" Args: model (nn.Module): a model that preceeds the", "blocks is not None self.blocks = blocks init_net_weights(self) def forward(self, x: torch.Tensor) ->", "N*6 (Index, x_ctr, y_ctr, width, height, angle_degrees) if using RoIAlignRotated. \"\"\" features =", "def __init__(self, *, blocks: nn.ModuleList) -> None: \"\"\" Args: blocks (torch.nn.module_list): the list", "\"\"\" super().__init__() assert blocks is not None self.blocks = blocks init_net_weights(self) def forward(self,", "N ↓ The ResNet builder can be found in `create_resnet`. \"\"\" def __init__(self,", "(Index, X_1,Y_1,X_2,Y_2) if using RoIAlign and N*6 (Index, x_ctr, y_ctr, width, height, angle_degrees)", "self.detection_head(features, bboxes) return out.view(out.shape[0], -1) class MultiPathWayWithFuse(nn.Module): \"\"\" Build multi-pathway block with fusion", "can be found in `create_resnet`. \"\"\" def __init__(self, *, blocks: nn.ModuleList) -> None:", "be a list of tensors\" if self.inplace: x_out = x else: x_out =", "out = self.detection_head(features, bboxes) return out.view(out.shape[0], -1) class MultiPathWayWithFuse(nn.Module): \"\"\" Build multi-pathway block", "detection_head: nn.Module): \"\"\" Args: model (nn.Module): a model that preceeds the head. Ex:", "Net(nn.Module): \"\"\" Build a general Net models with a list of blocks for", "and the outputs from the model. \"\"\" super().__init__() self.model = model self.detection_head =", "= blocks init_net_weights(self) def forward(self, x: torch.Tensor) -> torch.Tensor: for idx in range(len(self.blocks)):", "is N*5 (Index, X_1,Y_1,X_2,Y_2) if using RoIAlign and N*6 (Index, x_ctr, y_ctr, width,", "\"\"\" def __init__(self, model: nn.Module, detection_head: nn.Module): \"\"\" Args: model (nn.Module): a model", "that handles bounding boxes as part of input. \"\"\" def __init__(self, model: nn.Module,", "angle_degrees) if using RoIAlignRotated. \"\"\" features = self.model(x) out = self.detection_head(features, bboxes) return", "None: \"\"\" Args: blocks (torch.nn.module_list): the list of block modules. \"\"\" super().__init__() assert", "input list without making a copy. \"\"\" super().__init__() set_attributes(self, locals()) def forward(self, x:", "Fusion layers across different pathways. :: Pathway 1 ... Pathway N ↓ ↓", "from typing import List, Optional import torch import torch.nn as nn from pytorchvideo.layers.utils", "(nn.Module): a model that preceeds the head. Ex: stem + stages. detection_head (nn.Module):", "multipathway_blocks (nn.module_list): list of models from all pathways. multipathway_fusion (nn.module): fusion model. inplace", "for idx in range(len(self.blocks)): x = self.blocks[idx](x) return x class DetectionBBoxNetwork(nn.Module): \"\"\" A", "torch.Tensor) -> torch.Tensor: for idx in range(len(self.blocks)): x = self.blocks[idx](x) return x class", "x (torch.tensor): input tensor bboxes (torch.tensor): accociated bounding boxes. The format is N*5", "model (nn.Module): a model that preceeds the head. Ex: stem + stages. detection_head", "a copy. \"\"\" super().__init__() set_attributes(self, locals()) def forward(self, x: List[torch.Tensor]) -> torch.Tensor: assert", "detection_head def forward(self, x: torch.Tensor, bboxes: torch.Tensor): \"\"\" Args: x (torch.tensor): input tensor", "= self.blocks[idx](x) return x class DetectionBBoxNetwork(nn.Module): \"\"\" A general purpose model that handles", "handles bounding boxes as part of input. \"\"\" def __init__(self, model: nn.Module, detection_head:", "using RoIAlign and N*6 (Index, x_ctr, y_ctr, width, height, angle_degrees) if using RoIAlignRotated.", "Block N ↓ The ResNet builder can be found in `create_resnet`. \"\"\" def", "Args: x (torch.tensor): input tensor bboxes (torch.tensor): accociated bounding boxes. The format is", "The format is N*5 (Index, X_1,Y_1,X_2,Y_2) if using RoIAlign and N*6 (Index, x_ctr,", "pathways. :: Pathway 1 ... Pathway N ↓ ↓ Block 1 Block N", "__init__(self, *, blocks: nn.ModuleList) -> None: \"\"\" Args: blocks (torch.nn.module_list): the list of", "range(len(self.blocks)): x = self.blocks[idx](x) return x class DetectionBBoxNetwork(nn.Module): \"\"\" A general purpose model", "to be a list of tensors\" if self.inplace: x_out = x else: x_out", "Build a general Net models with a list of blocks for video recognition.", "-> None: \"\"\" Args: blocks (torch.nn.module_list): the list of block modules. \"\"\" super().__init__()", "-> torch.Tensor: assert isinstance( x, list ), \"input for MultiPathWayWithFuse needs to be", "__init__( self, *, multipathway_blocks: nn.ModuleList, multipathway_fusion: Optional[nn.Module], inplace: Optional[bool] = True, ) ->", "class DetectionBBoxNetwork(nn.Module): \"\"\" A general purpose model that handles bounding boxes as part", "boxes as part of input. \"\"\" def __init__(self, model: nn.Module, detection_head: nn.Module): \"\"\"", "take in input bounding boxes and the outputs from the model. \"\"\" super().__init__()", "(c) Facebook, Inc. and its affiliates. All Rights Reserved. from typing import List,", "All Rights Reserved. from typing import List, Optional import torch import torch.nn as", "fusion for video recognition, each of the pathway contains its own Blocks and", "the input list without making a copy. \"\"\" super().__init__() set_attributes(self, locals()) def forward(self,", "a model that preceeds the head. Ex: stem + stages. detection_head (nn.Module): a", "bboxes) return out.view(out.shape[0], -1) class MultiPathWayWithFuse(nn.Module): \"\"\" Build multi-pathway block with fusion for", "-> None: \"\"\" Args: multipathway_blocks (nn.module_list): list of models from all pathways. multipathway_fusion", "Block 1 ↓ . . . ↓ Block N ↓ The ResNet builder", "of input. \"\"\" def __init__(self, model: nn.Module, detection_head: nn.Module): \"\"\" Args: model (nn.Module):", "features = self.model(x) out = self.detection_head(features, bboxes) return out.view(out.shape[0], -1) class MultiPathWayWithFuse(nn.Module): \"\"\"", "pytorchvideo.models.weight_init import init_net_weights class Net(nn.Module): \"\"\" Build a general Net models with a", "list of block modules. \"\"\" super().__init__() assert blocks is not None self.blocks =", "None: x_out[pathway_idx] = self.multipathway_blocks[pathway_idx]( x[pathway_idx] ) if self.multipathway_fusion is not None: x_out =", "class MultiPathWayWithFuse(nn.Module): \"\"\" Build multi-pathway block with fusion for video recognition, each of", "\"\"\" def __init__(self, *, blocks: nn.ModuleList) -> None: \"\"\" Args: blocks (torch.nn.module_list): the", "isinstance( x, list ), \"input for MultiPathWayWithFuse needs to be a list of", "Reserved. from typing import List, Optional import torch import torch.nn as nn from", "pytorchvideo.layers.utils import set_attributes from pytorchvideo.models.weight_init import init_net_weights class Net(nn.Module): \"\"\" Build a general", "list of tensors\" if self.inplace: x_out = x else: x_out = [None] *", "1 ↓ . . . ↓ Block N ↓ The ResNet builder can", "boxes and the outputs from the model. \"\"\" super().__init__() self.model = model self.detection_head", "of block modules. \"\"\" super().__init__() assert blocks is not None self.blocks = blocks", "that preceeds the head. Ex: stem + stages. detection_head (nn.Module): a network head.", "else: x_out = [None] * len(x) for pathway_idx in range(len(self.multipathway_blocks)): if self.multipathway_blocks[pathway_idx] is", "import init_net_weights class Net(nn.Module): \"\"\" Build a general Net models with a list", ". . . ↓ Block N ↓ The ResNet builder can be found", "(torch.tensor): input tensor bboxes (torch.tensor): accociated bounding boxes. The format is N*5 (Index,", "height, angle_degrees) if using RoIAlignRotated. \"\"\" features = self.model(x) out = self.detection_head(features, bboxes)", "list without making a copy. \"\"\" super().__init__() set_attributes(self, locals()) def forward(self, x: List[torch.Tensor])", "self.model(x) out = self.detection_head(features, bboxes) return out.view(out.shape[0], -1) class MultiPathWayWithFuse(nn.Module): \"\"\" Build multi-pathway", "forward(self, x: torch.Tensor, bboxes: torch.Tensor): \"\"\" Args: x (torch.tensor): input tensor bboxes (torch.tensor):", "If inplace, directly update the input list without making a copy. \"\"\" super().__init__()", "model. inplace (bool): If inplace, directly update the input list without making a", "a list of tensors\" if self.inplace: x_out = x else: x_out = [None]", "x: torch.Tensor, bboxes: torch.Tensor): \"\"\" Args: x (torch.tensor): input tensor bboxes (torch.tensor): accociated", "without making a copy. \"\"\" super().__init__() set_attributes(self, locals()) def forward(self, x: List[torch.Tensor]) ->", "MultiPathWayWithFuse(nn.Module): \"\"\" Build multi-pathway block with fusion for video recognition, each of the", "Block 1 Block N ↓⭠ --Fusion----↓ \"\"\" def __init__( self, *, multipathway_blocks: nn.ModuleList,", ":: Input ↓ Block 1 ↓ . . . ↓ Block N ↓", "bounding boxes as part of input. \"\"\" def __init__(self, model: nn.Module, detection_head: nn.Module):", "init_net_weights class Net(nn.Module): \"\"\" Build a general Net models with a list of", "torch.Tensor: for idx in range(len(self.blocks)): x = self.blocks[idx](x) return x class DetectionBBoxNetwork(nn.Module): \"\"\"", "recognition. :: Input ↓ Block 1 ↓ . . . ↓ Block N", "the head. Ex: stem + stages. detection_head (nn.Module): a network head. that can", "Optional[bool] = True, ) -> None: \"\"\" Args: multipathway_blocks (nn.module_list): list of models", "def __init__(self, model: nn.Module, detection_head: nn.Module): \"\"\" Args: model (nn.Module): a model that", "a general Net models with a list of blocks for video recognition. ::", "__init__(self, model: nn.Module, detection_head: nn.Module): \"\"\" Args: model (nn.Module): a model that preceeds", "Pathway 1 ... Pathway N ↓ ↓ Block 1 Block N ↓⭠ --Fusion----↓", "the model. \"\"\" super().__init__() self.model = model self.detection_head = detection_head def forward(self, x:", "nn.ModuleList) -> None: \"\"\" Args: blocks (torch.nn.module_list): the list of block modules. \"\"\"", "models with a list of blocks for video recognition. :: Input ↓ Block", "... Pathway N ↓ ↓ Block 1 Block N ↓⭠ --Fusion----↓ \"\"\" def", "range(len(self.multipathway_blocks)): if self.multipathway_blocks[pathway_idx] is not None: x_out[pathway_idx] = self.multipathway_blocks[pathway_idx]( x[pathway_idx] ) if self.multipathway_fusion", "model self.detection_head = detection_head def forward(self, x: torch.Tensor, bboxes: torch.Tensor): \"\"\" Args: x", "video recognition. :: Input ↓ Block 1 ↓ . . . ↓ Block", "recognition, each of the pathway contains its own Blocks and Fusion layers across", "tensor bboxes (torch.tensor): accociated bounding boxes. The format is N*5 (Index, X_1,Y_1,X_2,Y_2) if", "Args: multipathway_blocks (nn.module_list): list of models from all pathways. multipathway_fusion (nn.module): fusion model.", "A general purpose model that handles bounding boxes as part of input. \"\"\"", "\"\"\" Build a general Net models with a list of blocks for video", "import set_attributes from pytorchvideo.models.weight_init import init_net_weights class Net(nn.Module): \"\"\" Build a general Net", "↓ . . . ↓ Block N ↓ The ResNet builder can be", "\"\"\" Args: blocks (torch.nn.module_list): the list of block modules. \"\"\" super().__init__() assert blocks", "MultiPathWayWithFuse needs to be a list of tensors\" if self.inplace: x_out = x", "= self.detection_head(features, bboxes) return out.view(out.shape[0], -1) class MultiPathWayWithFuse(nn.Module): \"\"\" Build multi-pathway block with", "(bool): If inplace, directly update the input list without making a copy. \"\"\"", "\"\"\" Args: x (torch.tensor): input tensor bboxes (torch.tensor): accociated bounding boxes. The format", "import List, Optional import torch import torch.nn as nn from pytorchvideo.layers.utils import set_attributes", "nn.Module, detection_head: nn.Module): \"\"\" Args: model (nn.Module): a model that preceeds the head.", "x: List[torch.Tensor]) -> torch.Tensor: assert isinstance( x, list ), \"input for MultiPathWayWithFuse needs", "torch import torch.nn as nn from pytorchvideo.layers.utils import set_attributes from pytorchvideo.models.weight_init import init_net_weights", "detection_head (nn.Module): a network head. that can take in input bounding boxes and", "bounding boxes. The format is N*5 (Index, X_1,Y_1,X_2,Y_2) if using RoIAlign and N*6", "its affiliates. All Rights Reserved. from typing import List, Optional import torch import", "and N*6 (Index, x_ctr, y_ctr, width, height, angle_degrees) if using RoIAlignRotated. \"\"\" features", "multipathway_fusion (nn.module): fusion model. inplace (bool): If inplace, directly update the input list", "init_net_weights(self) def forward(self, x: torch.Tensor) -> torch.Tensor: for idx in range(len(self.blocks)): x =", "block modules. \"\"\" super().__init__() assert blocks is not None self.blocks = blocks init_net_weights(self)", "↓ The ResNet builder can be found in `create_resnet`. \"\"\" def __init__(self, *,", "idx in range(len(self.blocks)): x = self.blocks[idx](x) return x class DetectionBBoxNetwork(nn.Module): \"\"\" A general", ":: Pathway 1 ... Pathway N ↓ ↓ Block 1 Block N ↓⭠", "set_attributes(self, locals()) def forward(self, x: List[torch.Tensor]) -> torch.Tensor: assert isinstance( x, list ),", "as part of input. \"\"\" def __init__(self, model: nn.Module, detection_head: nn.Module): \"\"\" Args:", "Ex: stem + stages. detection_head (nn.Module): a network head. that can take in", "for video recognition. :: Input ↓ Block 1 ↓ . . . ↓", "Block N ↓⭠ --Fusion----↓ \"\"\" def __init__( self, *, multipathway_blocks: nn.ModuleList, multipathway_fusion: Optional[nn.Module],", "typing import List, Optional import torch import torch.nn as nn from pytorchvideo.layers.utils import", "self.blocks[idx](x) return x class DetectionBBoxNetwork(nn.Module): \"\"\" A general purpose model that handles bounding", "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from typing", "\"\"\" super().__init__() self.model = model self.detection_head = detection_head def forward(self, x: torch.Tensor, bboxes:", "↓ Block 1 Block N ↓⭠ --Fusion----↓ \"\"\" def __init__( self, *, multipathway_blocks:", "can take in input bounding boxes and the outputs from the model. \"\"\"", "that can take in input bounding boxes and the outputs from the model.", "The ResNet builder can be found in `create_resnet`. \"\"\" def __init__(self, *, blocks:", "\"\"\" features = self.model(x) out = self.detection_head(features, bboxes) return out.view(out.shape[0], -1) class MultiPathWayWithFuse(nn.Module):", "nn from pytorchvideo.layers.utils import set_attributes from pytorchvideo.models.weight_init import init_net_weights class Net(nn.Module): \"\"\" Build", "= [None] * len(x) for pathway_idx in range(len(self.multipathway_blocks)): if self.multipathway_blocks[pathway_idx] is not None:", "pathway contains its own Blocks and Fusion layers across different pathways. :: Pathway", "--Fusion----↓ \"\"\" def __init__( self, *, multipathway_blocks: nn.ModuleList, multipathway_fusion: Optional[nn.Module], inplace: Optional[bool] =", "block with fusion for video recognition, each of the pathway contains its own", "Optional[nn.Module], inplace: Optional[bool] = True, ) -> None: \"\"\" Args: multipathway_blocks (nn.module_list): list", "pathways. multipathway_fusion (nn.module): fusion model. inplace (bool): If inplace, directly update the input", "multipathway_blocks: nn.ModuleList, multipathway_fusion: Optional[nn.Module], inplace: Optional[bool] = True, ) -> None: \"\"\" Args:", "from pytorchvideo.layers.utils import set_attributes from pytorchvideo.models.weight_init import init_net_weights class Net(nn.Module): \"\"\" Build a", "\"\"\" super().__init__() set_attributes(self, locals()) def forward(self, x: List[torch.Tensor]) -> torch.Tensor: assert isinstance( x,", "`create_resnet`. \"\"\" def __init__(self, *, blocks: nn.ModuleList) -> None: \"\"\" Args: blocks (torch.nn.module_list):", "if self.inplace: x_out = x else: x_out = [None] * len(x) for pathway_idx", "update the input list without making a copy. \"\"\" super().__init__() set_attributes(self, locals()) def", "RoIAlign and N*6 (Index, x_ctr, y_ctr, width, height, angle_degrees) if using RoIAlignRotated. \"\"\"", "x_out[pathway_idx] = self.multipathway_blocks[pathway_idx]( x[pathway_idx] ) if self.multipathway_fusion is not None: x_out = self.multipathway_fusion(x_out)", "outputs from the model. \"\"\" super().__init__() self.model = model self.detection_head = detection_head def", "= detection_head def forward(self, x: torch.Tensor, bboxes: torch.Tensor): \"\"\" Args: x (torch.tensor): input", "from pytorchvideo.models.weight_init import init_net_weights class Net(nn.Module): \"\"\" Build a general Net models with", "torch.nn as nn from pytorchvideo.layers.utils import set_attributes from pytorchvideo.models.weight_init import init_net_weights class Net(nn.Module):", "*, multipathway_blocks: nn.ModuleList, multipathway_fusion: Optional[nn.Module], inplace: Optional[bool] = True, ) -> None: \"\"\"", "bboxes: torch.Tensor): \"\"\" Args: x (torch.tensor): input tensor bboxes (torch.tensor): accociated bounding boxes.", "locals()) def forward(self, x: List[torch.Tensor]) -> torch.Tensor: assert isinstance( x, list ), \"input", "of models from all pathways. multipathway_fusion (nn.module): fusion model. inplace (bool): If inplace,", "nn.ModuleList, multipathway_fusion: Optional[nn.Module], inplace: Optional[bool] = True, ) -> None: \"\"\" Args: multipathway_blocks", "each of the pathway contains its own Blocks and Fusion layers across different", "Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from typing import", "\"\"\" Args: multipathway_blocks (nn.module_list): list of models from all pathways. multipathway_fusion (nn.module): fusion", "is not None: x_out[pathway_idx] = self.multipathway_blocks[pathway_idx]( x[pathway_idx] ) if self.multipathway_fusion is not None:", "↓ Block 1 ↓ . . . ↓ Block N ↓ The ResNet", "from the model. \"\"\" super().__init__() self.model = model self.detection_head = detection_head def forward(self,", "x_ctr, y_ctr, width, height, angle_degrees) if using RoIAlignRotated. \"\"\" features = self.model(x) out", "1 ... Pathway N ↓ ↓ Block 1 Block N ↓⭠ --Fusion----↓ \"\"\"", "Args: blocks (torch.nn.module_list): the list of block modules. \"\"\" super().__init__() assert blocks is", "head. Ex: stem + stages. detection_head (nn.Module): a network head. that can take", "video recognition, each of the pathway contains its own Blocks and Fusion layers", "-> torch.Tensor: for idx in range(len(self.blocks)): x = self.blocks[idx](x) return x class DetectionBBoxNetwork(nn.Module):", "x: torch.Tensor) -> torch.Tensor: for idx in range(len(self.blocks)): x = self.blocks[idx](x) return x", "\"input for MultiPathWayWithFuse needs to be a list of tensors\" if self.inplace: x_out", "general Net models with a list of blocks for video recognition. :: Input", "contains its own Blocks and Fusion layers across different pathways. :: Pathway 1", "x = self.blocks[idx](x) return x class DetectionBBoxNetwork(nn.Module): \"\"\" A general purpose model that", "torch.Tensor): \"\"\" Args: x (torch.tensor): input tensor bboxes (torch.tensor): accociated bounding boxes. The", "its own Blocks and Fusion layers across different pathways. :: Pathway 1 ...", "of blocks for video recognition. :: Input ↓ Block 1 ↓ . .", "if self.multipathway_blocks[pathway_idx] is not None: x_out[pathway_idx] = self.multipathway_blocks[pathway_idx]( x[pathway_idx] ) if self.multipathway_fusion is", "X_1,Y_1,X_2,Y_2) if using RoIAlign and N*6 (Index, x_ctr, y_ctr, width, height, angle_degrees) if", "DetectionBBoxNetwork(nn.Module): \"\"\" A general purpose model that handles bounding boxes as part of", "ResNet builder can be found in `create_resnet`. \"\"\" def __init__(self, *, blocks: nn.ModuleList)", "inplace, directly update the input list without making a copy. \"\"\" super().__init__() set_attributes(self,", "network head. that can take in input bounding boxes and the outputs from", "Inc. and its affiliates. All Rights Reserved. from typing import List, Optional import", "with fusion for video recognition, each of the pathway contains its own Blocks", "affiliates. All Rights Reserved. from typing import List, Optional import torch import torch.nn", "the list of block modules. \"\"\" super().__init__() assert blocks is not None self.blocks", "= x else: x_out = [None] * len(x) for pathway_idx in range(len(self.multipathway_blocks)): if", "(torch.tensor): accociated bounding boxes. The format is N*5 (Index, X_1,Y_1,X_2,Y_2) if using RoIAlign", "RoIAlignRotated. \"\"\" features = self.model(x) out = self.detection_head(features, bboxes) return out.view(out.shape[0], -1) class", "builder can be found in `create_resnet`. \"\"\" def __init__(self, *, blocks: nn.ModuleList) ->", "model. \"\"\" super().__init__() self.model = model self.detection_head = detection_head def forward(self, x: torch.Tensor,", "y_ctr, width, height, angle_degrees) if using RoIAlignRotated. \"\"\" features = self.model(x) out =", "* len(x) for pathway_idx in range(len(self.multipathway_blocks)): if self.multipathway_blocks[pathway_idx] is not None: x_out[pathway_idx] =", "N*5 (Index, X_1,Y_1,X_2,Y_2) if using RoIAlign and N*6 (Index, x_ctr, y_ctr, width, height,", "for pathway_idx in range(len(self.multipathway_blocks)): if self.multipathway_blocks[pathway_idx] is not None: x_out[pathway_idx] = self.multipathway_blocks[pathway_idx]( x[pathway_idx]", "None self.blocks = blocks init_net_weights(self) def forward(self, x: torch.Tensor) -> torch.Tensor: for idx", "preceeds the head. Ex: stem + stages. detection_head (nn.Module): a network head. that", "\"\"\" Build multi-pathway block with fusion for video recognition, each of the pathway", "self, *, multipathway_blocks: nn.ModuleList, multipathway_fusion: Optional[nn.Module], inplace: Optional[bool] = True, ) -> None:", "forward(self, x: torch.Tensor) -> torch.Tensor: for idx in range(len(self.blocks)): x = self.blocks[idx](x) return", "self.inplace: x_out = x else: x_out = [None] * len(x) for pathway_idx in", "part of input. \"\"\" def __init__(self, model: nn.Module, detection_head: nn.Module): \"\"\" Args: model", "in input bounding boxes and the outputs from the model. \"\"\" super().__init__() self.model", "modules. \"\"\" super().__init__() assert blocks is not None self.blocks = blocks init_net_weights(self) def", "def __init__( self, *, multipathway_blocks: nn.ModuleList, multipathway_fusion: Optional[nn.Module], inplace: Optional[bool] = True, )", "N ↓⭠ --Fusion----↓ \"\"\" def __init__( self, *, multipathway_blocks: nn.ModuleList, multipathway_fusion: Optional[nn.Module], inplace:", "↓ Block N ↓ The ResNet builder can be found in `create_resnet`. \"\"\"", "Facebook, Inc. and its affiliates. All Rights Reserved. from typing import List, Optional", "across different pathways. :: Pathway 1 ... Pathway N ↓ ↓ Block 1", "a list of blocks for video recognition. :: Input ↓ Block 1 ↓", "[None] * len(x) for pathway_idx in range(len(self.multipathway_blocks)): if self.multipathway_blocks[pathway_idx] is not None: x_out[pathway_idx]", "as nn from pytorchvideo.layers.utils import set_attributes from pytorchvideo.models.weight_init import init_net_weights class Net(nn.Module): \"\"\"", "pathway_idx in range(len(self.multipathway_blocks)): if self.multipathway_blocks[pathway_idx] is not None: x_out[pathway_idx] = self.multipathway_blocks[pathway_idx]( x[pathway_idx] )", "input tensor bboxes (torch.tensor): accociated bounding boxes. The format is N*5 (Index, X_1,Y_1,X_2,Y_2)", "Build multi-pathway block with fusion for video recognition, each of the pathway contains", "a network head. that can take in input bounding boxes and the outputs", "inplace: Optional[bool] = True, ) -> None: \"\"\" Args: multipathway_blocks (nn.module_list): list of", "out.view(out.shape[0], -1) class MultiPathWayWithFuse(nn.Module): \"\"\" Build multi-pathway block with fusion for video recognition,", "format is N*5 (Index, X_1,Y_1,X_2,Y_2) if using RoIAlign and N*6 (Index, x_ctr, y_ctr,", "with a list of blocks for video recognition. :: Input ↓ Block 1", "\"\"\" Args: model (nn.Module): a model that preceeds the head. Ex: stem +", "assert isinstance( x, list ), \"input for MultiPathWayWithFuse needs to be a list", "class Net(nn.Module): \"\"\" Build a general Net models with a list of blocks", "), \"input for MultiPathWayWithFuse needs to be a list of tensors\" if self.inplace:", "x else: x_out = [None] * len(x) for pathway_idx in range(len(self.multipathway_blocks)): if self.multipathway_blocks[pathway_idx]", "(nn.Module): a network head. that can take in input bounding boxes and the", ". . ↓ Block N ↓ The ResNet builder can be found in", "set_attributes from pytorchvideo.models.weight_init import init_net_weights class Net(nn.Module): \"\"\" Build a general Net models", "the outputs from the model. \"\"\" super().__init__() self.model = model self.detection_head = detection_head", "self.multipathway_blocks[pathway_idx]( x[pathway_idx] ) if self.multipathway_fusion is not None: x_out = self.multipathway_fusion(x_out) return x_out", "Rights Reserved. from typing import List, Optional import torch import torch.nn as nn", "width, height, angle_degrees) if using RoIAlignRotated. \"\"\" features = self.model(x) out = self.detection_head(features,", "torch.Tensor: assert isinstance( x, list ), \"input for MultiPathWayWithFuse needs to be a", "def forward(self, x: torch.Tensor) -> torch.Tensor: for idx in range(len(self.blocks)): x = self.blocks[idx](x)", "for MultiPathWayWithFuse needs to be a list of tensors\" if self.inplace: x_out =", "needs to be a list of tensors\" if self.inplace: x_out = x else:", "if using RoIAlign and N*6 (Index, x_ctr, y_ctr, width, height, angle_degrees) if using", "x, list ), \"input for MultiPathWayWithFuse needs to be a list of tensors\"", "def forward(self, x: List[torch.Tensor]) -> torch.Tensor: assert isinstance( x, list ), \"input for", ") -> None: \"\"\" Args: multipathway_blocks (nn.module_list): list of models from all pathways.", "= self.multipathway_blocks[pathway_idx]( x[pathway_idx] ) if self.multipathway_fusion is not None: x_out = self.multipathway_fusion(x_out) return", "self.detection_head = detection_head def forward(self, x: torch.Tensor, bboxes: torch.Tensor): \"\"\" Args: x (torch.tensor):", "↓⭠ --Fusion----↓ \"\"\" def __init__( self, *, multipathway_blocks: nn.ModuleList, multipathway_fusion: Optional[nn.Module], inplace: Optional[bool]", "models from all pathways. multipathway_fusion (nn.module): fusion model. inplace (bool): If inplace, directly", "multi-pathway block with fusion for video recognition, each of the pathway contains its", "1 Block N ↓⭠ --Fusion----↓ \"\"\" def __init__( self, *, multipathway_blocks: nn.ModuleList, multipathway_fusion:", ". ↓ Block N ↓ The ResNet builder can be found in `create_resnet`.", "purpose model that handles bounding boxes as part of input. \"\"\" def __init__(self,", "(Index, x_ctr, y_ctr, width, height, angle_degrees) if using RoIAlignRotated. \"\"\" features = self.model(x)", "stages. detection_head (nn.Module): a network head. that can take in input bounding boxes", "general purpose model that handles bounding boxes as part of input. \"\"\" def", "True, ) -> None: \"\"\" Args: multipathway_blocks (nn.module_list): list of models from all", "model that preceeds the head. Ex: stem + stages. detection_head (nn.Module): a network", "the pathway contains its own Blocks and Fusion layers across different pathways. ::", "and Fusion layers across different pathways. :: Pathway 1 ... Pathway N ↓", "= model self.detection_head = detection_head def forward(self, x: torch.Tensor, bboxes: torch.Tensor): \"\"\" Args:", "\"\"\" def __init__( self, *, multipathway_blocks: nn.ModuleList, multipathway_fusion: Optional[nn.Module], inplace: Optional[bool] = True,", "Blocks and Fusion layers across different pathways. :: Pathway 1 ... Pathway N", "input. \"\"\" def __init__(self, model: nn.Module, detection_head: nn.Module): \"\"\" Args: model (nn.Module): a" ]
[ "return phase_list f_occ=phase_list.index(mx) phase_list.reverse() l_occ=len(phase_list)-phase_list.index(mx)-1 phase_list.reverse() if (0 in phase_list[:f_occ]) and (0 in", "phase_list f_occ=phase_list.index(mx) phase_list.reverse() l_occ=len(phase_list)-phase_list.index(mx)-1 phase_list.reverse() if (0 in phase_list[:f_occ]) and (0 in phase_list[l_occ:]):", "total_phases): mx=max(phase_list) if len(phase_list)<total_phases: return phase_list if mx<total_phases//2: return phase_list f_occ=phase_list.index(mx) phase_list.reverse() l_occ=len(phase_list)-phase_list.index(mx)-1", "phase_list.reverse() l_occ=len(phase_list)-phase_list.index(mx)-1 phase_list.reverse() if (0 in phase_list[:f_occ]) and (0 in phase_list[l_occ:]): return [0]", "f_occ=phase_list.index(mx) phase_list.reverse() l_occ=len(phase_list)-phase_list.index(mx)-1 phase_list.reverse() if (0 in phase_list[:f_occ]) and (0 in phase_list[l_occ:]): return", "l_occ=len(phase_list)-phase_list.index(mx)-1 phase_list.reverse() if (0 in phase_list[:f_occ]) and (0 in phase_list[l_occ:]): return [0] else:", "mx=max(phase_list) if len(phase_list)<total_phases: return phase_list if mx<total_phases//2: return phase_list f_occ=phase_list.index(mx) phase_list.reverse() l_occ=len(phase_list)-phase_list.index(mx)-1 phase_list.reverse()", "countReps(phase_list, total_phases): mx=max(phase_list) if len(phase_list)<total_phases: return phase_list if mx<total_phases//2: return phase_list f_occ=phase_list.index(mx) phase_list.reverse()", "len(phase_list)<total_phases: return phase_list if mx<total_phases//2: return phase_list f_occ=phase_list.index(mx) phase_list.reverse() l_occ=len(phase_list)-phase_list.index(mx)-1 phase_list.reverse() if (0", "if mx<total_phases//2: return phase_list f_occ=phase_list.index(mx) phase_list.reverse() l_occ=len(phase_list)-phase_list.index(mx)-1 phase_list.reverse() if (0 in phase_list[:f_occ]) and", "phase_list if mx<total_phases//2: return phase_list f_occ=phase_list.index(mx) phase_list.reverse() l_occ=len(phase_list)-phase_list.index(mx)-1 phase_list.reverse() if (0 in phase_list[:f_occ])", "if len(phase_list)<total_phases: return phase_list if mx<total_phases//2: return phase_list f_occ=phase_list.index(mx) phase_list.reverse() l_occ=len(phase_list)-phase_list.index(mx)-1 phase_list.reverse() if", "mx<total_phases//2: return phase_list f_occ=phase_list.index(mx) phase_list.reverse() l_occ=len(phase_list)-phase_list.index(mx)-1 phase_list.reverse() if (0 in phase_list[:f_occ]) and (0", "if (0 in phase_list[:f_occ]) and (0 in phase_list[l_occ:]): return [0] else: return phase_list", "def countReps(phase_list, total_phases): mx=max(phase_list) if len(phase_list)<total_phases: return phase_list if mx<total_phases//2: return phase_list f_occ=phase_list.index(mx)", "phase_list.reverse() if (0 in phase_list[:f_occ]) and (0 in phase_list[l_occ:]): return [0] else: return", "return phase_list if mx<total_phases//2: return phase_list f_occ=phase_list.index(mx) phase_list.reverse() l_occ=len(phase_list)-phase_list.index(mx)-1 phase_list.reverse() if (0 in" ]
[ "A + 9 for i in range(5): A = A + 9 A", "range(2): A = A + 3 for i in range(1): A = A", "A + 8 for i in range(1): A = A + 2 for", "A + 5 A = A + 3 A = A + 6", "8 A = A + 7 A = A + 2 for i", "for i in range(8): for i in range(6): A = A + 4", "A + 5 A = A + 5 A = A + 1", "= A + 2 A = A + 5 for i in range(9):", "range(3): A = A + 3 A = A + 9 A =", "8 A = A + 9 A = A + 3 A =", "6 for i in range(7): for i in range(6): for i in range(9):", "A = A + 4 for i in range(9): for i in range(3):", "3 A = A + 8 for i in range(7): A = A", "+ 4 A = A + 1 A = A + 7 A", "4 A = A + 9 A = A + 1 A =", "i in range(8): A = A + 9 A = A + 8", "+ 5 A = A + 2 A = A + 1 for", "A + 3 A = A + 6 A = A + 4", "for i in range(8): A = A + 5 for i in range(3):", "for i in range(8): A = A + 9 A = A +", "i in range(4): A = A + 2 A = A + 1", "A + 6 for i in range(1): A = A + 7 A", "+ 2 for i in range(4): A = A + 6 for i", "range(9): A = A + 6 A = A + 8 A =", "A = A + 4 for i in range(8): A = A +", "range(1): A = A + 3 for i in range(1): A = A", "A = A + 5 for i in range(7): A = A +", "4 for i in range(8): A = A + 2 A = A", "A + 9 for i in range(2): for i in range(6): A =", "5 for i in range(3): A = A + 5 A = A", "= A + 6 for i in range(1): A = A + 3", "8 A = A + 2 A = A + 3 for i", "in range(2): A = A + 7 for i in range(8): A =", "+ 3 for i in range(1): for i in range(1): A = A", "range(9): for i in range(7): A = A + 3 for i in", "= A + 3 A = A + 9 for i in range(7):", "range(1): A = A + 8 A = A + 4 for i", "5 A = A + 9 for i in range(2): for i in", "A = A + 1 for i in range(4): for i in range(1):", "5 for i in range(3): A = A + 3 A = A", "i in range(5): A = A + 9 A = A + 4", "in range(1): A = A + 4 A = A + 2 for", "range(5): A = A + 7 for i in range(5): A = A", "A + 4 for i in range(1): A = A + 5 for", "8 A = A + 1 A = A + 2 for i", "A + 9 A = A + 5 A = A + 5", "= A + 7 A = A + 4 for i in range(4):", "= A + 4 for i in range(8): A = A + 5", "A + 4 for i in range(3): A = A + 5 A", "A + 8 A = A + 2 for i in range(9): A", "in range(2): A = A + 2 for i in range(3): A =", "7 for i in range(5): A = A + 7 A = A", "A + 1 for i in range(6): A = A + 2 A", "in range(4): for i in range(9): A = A + 2 for i", "8 A = A + 3 for i in range(2): A = A", "7 for i in range(4): A = A + 2 A = A", "= A + 4 for i in range(1): for i in range(7): for", "i in range(6): A = A + 6 A = A + 1", "= A + 6 for i in range(3): for i in range(4): A", "= A + 6 A = A + 4 for i in range(3):", "A + 9 for i in range(1): A = A + 6 A", "for i in range(8): A = A + 7 for i in range(6):", "2 A = A + 4 A = A + 9 for i", "A + 4 A = A + 6 for i in range(1): A", "A + 8 for i in range(3): A = A + 7 A", "9 A = A + 3 for i in range(4): A = A", "2 A = A + 4 A = A + 7 for i", "in range(7): A = A + 2 for i in range(5): A =", "i in range(3): A = A + 9 for i in range(1): A", "+ 4 A = A + 5 for i in range(9): for i", "in range(5): for i in range(6): A = A + 5 A =", "= A + 7 A = A + 8 for i in range(4):", "i in range(1): for i in range(2): A = A + 2 for", "i in range(2): A = A + 6 for i in range(4): A", "range(7): for i in range(2): A = A + 3 A = A", "for i in range(9): A = A + 5 A = A +", "A + 3 for i in range(1): for i in range(1): A =", "in range(8): for i in range(1): A = A + 7 A =", "5 for i in range(8): for i in range(2): A = A +", "A + 4 A = A + 7 A = A + 6", "A + 7 A = A + 7 A = A + 3", "A = A + 5 A = A + 2 for i in", "A + 1 for i in range(9): A = A + 2 A", "in range(1): A = A + 8 A = A + 4 for", "8 for i in range(3): A = A + 7 A = A", "1 A = A + 5 A = A + 8 for i", "+ 9 A = A + 4 for i in range(9): A =", "5 for i in range(1): for i in range(8): A = A +", "range(5): for i in range(3): for i in range(6): A = A +", "3 A = A + 3 A = A + 3 A =", "= A + 3 for i in range(6): for i in range(1): A", "range(2): A = A + 3 A = A + 7 for i", "= A + 5 A = A + 9 for i in range(5):", "A = A + 5 A = A + 1 A = A", "+ 8 for i in range(1): A = A + 6 A =", "in range(6): A = A + 9 A = A + 3 for", "range(1): A = A + 3 A = A + 3 A =", "+ 4 A = A + 7 for i in range(1): for i", "i in range(5): A = A + 5 for i in range(1): A", "A + 5 for i in range(3): A = A + 7 for", "2 for i in range(3): A = A + 3 for i in", "+ 9 for i in range(1): A = A + 6 A =", "A + 2 A = A + 2 A = A + 8", "6 for i in range(5): for i in range(6): A = A +", "A + 4 for i in range(5): A = A + 8 A", "+ 3 for i in range(5): A = A + 5 A =", "A + 1 for i in range(5): A = A + 1 for", "= A + 9 A = A + 8 for i in range(1):", "+ 7 for i in range(5): A = A + 1 for i", "for i in range(7): for i in range(8): for i in range(6): A", "A + 2 for i in range(2): A = A + 3 for", "A = A + 7 for i in range(6): A = A +", "9 A = A + 8 for i in range(1): A = A", "range(1): A = A + 4 for i in range(2): A = A", "= A + 8 A = A + 6 A = A +", "+ 5 for i in range(9): A = A + 5 A =", "i in range(1): A = A + 4 for i in range(3): A", "+ 5 for i in range(4): A = A + 5 A =", "= A + 6 for i in range(7): A = A + 9", "+ 4 A = A + 2 A = A + 3 A", "for i in range(4): A = A + 2 A = A +", "A = A + 5 for i in range(9): for i in range(3):", "in range(2): A = A + 5 A = A + 6 for", "= A + 9 for i in range(5): A = A + 5", "range(9): A = A + 2 A = A + 9 A =", "range(6): A = A + 3 for i in range(9): A = A", "+ 4 A = A + 9 A = A + 1 A", "A + 5 for i in range(3): A = A + 5 for", "A = A + 5 for i in range(2): for i in range(3):", "= A + 1 for i in range(6): A = A + 8", "A = A + 1 A = A + 8 for i in", "+ 4 for i in range(7): A = A + 8 A =", "for i in range(7): A = A + 7 for i in range(7):", "A + 5 A = A + 2 A = A + 4", "i in range(6): for i in range(2): A = A + 1 A", "A + 8 A = A + 4 for i in range(3): for", "A + 1 for i in range(1): A = A + 4 A", "in range(2): A = A + 5 for i in range(3): A =", "in range(1): A = A + 3 A = A + 9 A", "i in range(2): A = A + 5 A = A + 1", "for i in range(4): for i in range(9): A = A + 2", "A + 7 for i in range(5): A = A + 3 for", "A = A + 6 A = A + 4 A = A", "1 A = A + 2 A = A + 8 A =", "+ 6 for i in range(4): for i in range(2): A = A", "i in range(3): for i in range(5): A = A + 4 for", "2 for i in range(7): for i in range(8): for i in range(6):", "i in range(6): A = A + 2 for i in range(5): A", "i in range(2): A = A + 2 A = A + 5", "A + 5 A = A + 2 A = A + 8", "in range(1): A = A + 9 A = A + 3 for", "= A + 3 for i in range(8): A = A + 2", "3 A = A + 6 A = A + 4 A =", "A + 9 A = A + 6 A = A + 3", "= A + 8 A = A + 4 for i in range(8):", "= A + 9 A = A + 6 for i in range(4):", "in range(9): A = A + 8 A = A + 9 A", "i in range(2): A = A + 1 A = A + 5", "for i in range(8): A = A + 2 A = A +", "A + 5 A = A + 1 A = A + 6", "range(8): A = A + 5 A = A + 1 A =", "A + 5 A = A + 7 for i in range(6): A", "in range(6): for i in range(9): A = A + 7 A =", "in range(2): A = A + 3 A = A + 2 A", "1 A = A + 4 for i in range(1): for i in", "A + 3 A = A + 9 A = A + 5", "6 A = A + 8 for i in range(6): A = A", "A + 5 A = A + 4 for i in range(4): for", "for i in range(4): A = A + 8 for i in range(7):", "+ 3 A = A + 3 A = A + 9 for", "for i in range(8): for i in range(6): A = A + 1", "1 A = A + 9 A = A + 8 for i", "A = A + 1 for i in range(8): for i in range(1):", "for i in range(9): for i in range(7): A = A + 3", "+ 1 A = A + 6 for i in range(2): A =", "A = A + 7 for i in range(2): for i in range(7):", "= A + 1 A = A + 5 A = A +", "A + 8 A = A + 4 for i in range(1): A", "range(2): for i in range(6): A = A + 9 A = A", "= A + 2 for i in range(2): A = A + 3", "range(6): for i in range(1): A = A + 5 A = A", "3 for i in range(6): for i in range(2): A = A +", "7 A = A + 2 for i in range(2): A = A", "A = A + 7 for i in range(4): for i in range(7):", "in range(6): A = A + 5 A = A + 7 for", "A + 7 for i in range(6): A = A + 7 for", "+ 7 A = A + 3 for i in range(2): A =", "A + 1 A = A + 5 A = A + 8", "= A + 3 for i in range(3): A = A + 4", "= A + 1 A = A + 4 for i in range(8):", "i in range(7): A = A + 3 A = A + 5", "= A + 5 A = A + 1 for i in range(8):", "A + 7 A = A + 7 A = A + 6", "3 A = A + 2 A = A + 7 A =", "= A + 1 for i in range(5): A = A + 1", "= A + 2 for i in range(1): A = A + 6", "= A + 3 for i in range(1): for i in range(1): A", "+ 6 for i in range(3): A = A + 3 A =", "range(7): for i in range(5): A = A + 7 for i in", "range(1): A = A + 8 for i in range(8): for i in", "= A + 5 A = A + 6 for i in range(1):", "= A + 8 A = A + 7 A = A +", "range(6): A = A + 7 for i in range(4): A = A", "= A + 4 for i in range(3): A = A + 1", "for i in range(3): for i in range(3): A = A + 5", "in range(5): A = A + 8 A = A + 7 A", "4 A = A + 7 for i in range(1): for i in", "+ 6 A = A + 9 A = A + 1 for", "A = A + 8 A = A + 8 for i in", "= A + 5 A = A + 4 for i in range(9):", "= A + 1 A = A + 5 for i in range(3):", "for i in range(3): for i in range(4): A = A + 9", "A + 1 A = A + 9 A = A + 8", "range(8): A = A + 7 A = A + 8 A =", "+ 5 for i in range(7): A = A + 8 for i", "= A + 8 for i in range(8): for i in range(4): for", "range(7): A = A + 5 A = A + 5 for i", "5 A = A + 8 for i in range(2): A = A", "A = A + 1 for i in range(2): A = A +", "5 for i in range(3): A = A + 6 for i in", "in range(6): A = A + 1 A = A + 6 for", "for i in range(4): for i in range(7): A = A + 1", "+ 9 A = A + 2 for i in range(2): A =", "8 A = A + 5 A = A + 9 for i", "i in range(7): for i in range(7): for i in range(5): A =", "A = A + 3 for i in range(5): for i in range(1):", "= A + 7 for i in range(4): A = A + 2", "+ 3 for i in range(3): A = A + 4 A =", "7 A = A + 7 A = A + 8 A =", "range(9): for i in range(6): A = A + 5 A = A", "1 for i in range(9): A = A + 9 A = A", "= A + 3 for i in range(5): A = A + 5", "5 for i in range(4): A = A + 2 A = A", "9 for i in range(3): for i in range(3): A = A +", "i in range(5): A = A + 2 A = A + 5", "+ 4 for i in range(3): A = A + 1 A =", "A + 5 for i in range(2): A = A + 6 for", "+ 2 for i in range(5): A = A + 9 A =", "= A + 1 for i in range(8): for i in range(1): for", "in range(6): A = A + 2 A = A + 8 A", "in range(5): A = A + 9 A = A + 3 for", "+ 7 for i in range(5): A = A + 7 A =", "9 A = A + 8 for i in range(8): for i in", "+ 5 A = A + 1 for i in range(6): A =", "for i in range(8): A = A + 2 for i in range(8):", "A + 4 for i in range(9): A = A + 2 A", "in range(5): A = A + 6 A = A + 5 for", "A + 6 A = A + 7 A = A + 6", "for i in range(6): A = A + 3 for i in range(2):", "in range(3): A = A + 6 for i in range(9): A =", "1 A = A + 2 A = A + 4 A =", "i in range(7): A = A + 3 A = A + 3", "+ 5 A = A + 2 A = A + 4 A", "= A + 9 A = A + 5 A = A +", "i in range(5): A = A + 3 A = A + 9", "4 for i in range(8): A = A + 5 A = A", "1 for i in range(6): A = A + 1 for i in", "A = A + 6 for i in range(4): A = A +", "A + 7 A = A + 8 for i in range(4): A", "range(2): A = A + 5 for i in range(3): A = A", "A + 8 A = A + 5 for i in range(1): A", "A = A + 8 for i in range(3): A = A +", "i in range($1) A = 0 for i in range(2): A = A", "9 for i in range(4): for i in range(1): A = A +", "= A + 6 A = A + 9 A = A +", "A + 2 for i in range(3): A = A + 3 for", "1 for i in range(8): for i in range(1): for i in range(4):", "A + 1 A = A + 4 for i in range(6): A", "1 A = A + 2 for i in range(6): A = A", "range(4): for i in range(7): A = A + 1 for i in", "range(7): for i in range(7): for i in range(5): A = A +", "+ 6 A = A + 9 A = A + 5 for", "range(8): A = A + 7 A = A + 2 A =", "A + 7 A = A + 2 for i in range(5): A", "= A + 6 for i in range(9): A = A + 1", "+ 5 for i in range(6): A = A + 7 for i", "= A + 2 for i in range(6): for i in range(1): A", "5 A = A + 5 A = A + 1 A =", "A + 5 for i in range(3): A = A + 5 A", "= A + 6 A = A + 6 for i in range(1):", "5 for i in range(1): A = A + 5 A = A", "3 A = A + 9 for i in range(4): for i in", "A + 4 A = A + 8 for i in range(4): A", "+ 8 for i in range(3): A = A + 7 A =", "A + 6 A = A + 8 A = A + 9", "A = A + 7 for i in range(8): A = A +", "A + 8 A = A + 8 for i in range(3): A", "A = A + 4 for i in range(4): for i in range(4):", "= A + 4 A = A + 7 for i in range(1):", "A + 3 for i in range(5): A = A + 2 A", "= A + 7 for i in range(9): for i in range(6): A", "+ 3 A = A + 6 A = A + 8 for", "A = A + 5 A = A + 4 for i in", "in range(3): A = A + 6 for i in range(5): A =", "= A + 7 A = A + 6 A = A +", "in range(1): A = A + 6 A = A + 6 A", "A + 6 A = A + 6 for i in range(9): A", "+ 2 A = A + 5 A = A + 2 A", "A = A + 1 for i in range(1): for i in range(5):", "for i in range(1): A = A + 1 for i in range(8):", "+ 7 for i in range(3): A = A + 4 A =", "i in range(7): A = A + 8 A = A + 3", "in range(1): A = A + 4 A = A + 4 for", "+ 3 A = A + 9 A = A + 5 A", "A = A + 2 A = A + 1 for i in", "4 A = A + 1 A = A + 7 A =", "in range(7): for i in range(4): A = A + 6 A =", "in range(3): A = A + 9 A = A + 1 for", "in range(6): A = A + 4 A = A + 4 A", "A + 3 A = A + 9 for i in range(7): A", "= A + 5 for i in range(4): A = A + 2", "= A + 8 for i in range(3): A = A + 2", "i in range(5): A = A + 8 A = A + 8", "8 A = A + 4 for i in range(8): for i in", "for i in range(9): for i in range(6): A = A + 4", "in range(7): A = A + 3 A = A + 3 A", "A + 5 for i in range(7): A = A + 5 for", "= A + 6 for i in range(9): A = A + 3", "= A + 2 for i in range(9): for i in range(3): A", "in range(7): A = A + 8 A = A + 3 for", "A + 8 A = A + 7 A = A + 2", "+ 2 A = A + 7 A = A + 2 for", "2 A = A + 4 for i in range(5): for i in", "2 A = A + 5 for i in range(4): A = A", "A = A + 5 A = A + 3 for i in", "for i in range(3): for i in range(2): A = A + 1", "+ 9 A = A + 6 A = A + 9 for", "A + 6 A = A + 2 for i in range(1): A", "i in range(8): A = A + 5 A = A + 6", "+ 9 A = A + 3 for i in range(9): A =", "in range(2): A = A + 4 for i in range(5): for i", "7 A = A + 8 A = A + 4 A =", "# REPEAT (\\d) -> for i in range($1) A = 0 for i", "A + 7 for i in range(1): A = A + 7 for", "+ 3 A = A + 7 for i in range(5): A =", "in range(2): A = A + 7 A = A + 7 for", "range(8): A = A + 9 A = A + 8 for i", "i in range(4): for i in range(1): A = A + 6 A", "9 A = A + 1 A = A + 3 for i", "+ 2 A = A + 9 A = A + 6 A", "5 for i in range(6): A = A + 7 for i in", "+ 7 A = A + 9 A = A + 7 for", "+ 8 for i in range(2): A = A + 7 A =", "range(4): A = A + 5 A = A + 7 for i", "i in range(4): A = A + 5 A = A + 7", "4 A = A + 4 A = A + 8 for i", "i in range(4): for i in range(4): A = A + 8 for", "A + 7 for i in range(5): A = A + 9 A", "in range(9): for i in range(5): A = A + 6 A =", "+ 4 A = A + 5 A = A + 8 for", "4 for i in range(7): A = A + 8 A = A", "= A + 3 A = A + 1 A = A +", "= A + 9 for i in range(8): A = A + 9", "in range(7): A = A + 8 for i in range(6): A =", "in range(9): A = A + 3 A = A + 6 for", "A = A + 9 for i in range(6): A = A +", "+ 8 A = A + 7 A = A + 5 A", "i in range(8): A = A + 9 A = A + 1", "+ 7 for i in range(1): A = A + 7 for i", "= A + 1 A = A + 7 A = A +", "A + 9 A = A + 6 A = A + 2", "= A + 3 for i in range(2): for i in range(8): A", "in range(4): for i in range(4): A = A + 1 A =", "6 for i in range(3): A = A + 3 A = A", "in range(2): A = A + 6 for i in range(4): A =", "range(4): A = A + 1 A = A + 2 for i", "2 A = A + 6 A = A + 6 for i", "A + 9 A = A + 8 for i in range(4): A", "+ 5 A = A + 1 for i in range(8): A =", "+ 2 A = A + 1 for i in range(1): A =", "+ 6 for i in range(3): for i in range(4): A = A", "A + 9 A = A + 1 A = A + 9", "A + 3 for i in range(2): for i in range(8): A =", "in range(7): A = A + 1 for i in range(2): A =", "A + 2 A = A + 3 A = A + 7", "i in range(8): A = A + 9 for i in range(5): A", "i in range(4): for i in range(9): A = A + 2 for", "A + 7 A = A + 7 A = A + 8", "i in range(1): A = A + 1 for i in range(6): A", "in range(2): A = A + 6 A = A + 7 A", "A + 8 for i in range(4): A = A + 2 for", "A + 9 A = A + 4 for i in range(2): A", "= A + 7 A = A + 4 A = A +", "in range(5): A = A + 5 for i in range(1): A =", "A + 9 for i in range(8): A = A + 7 for", "A + 3 for i in range(6): for i in range(1): A =", "= A + 2 A = A + 4 for i in range(6):", "range(9): A = A + 1 A = A + 9 A =", "A + 8 A = A + 3 A = A + 3", "A = A + 8 for i in range(9): A = A +", "7 for i in range(5): A = A + 1 A = A", "= A + 4 for i in range(9): for i in range(3): A", "6 for i in range(3): A = A + 7 for i in", "+ 3 for i in range(7): A = A + 3 A =", "A = A + 5 for i in range(8): A = A +", "for i in range(1): for i in range(2): A = A + 2", "A + 6 for i in range(5): A = A + 8 A", "= A + 7 for i in range(3): A = A + 9", "i in range(7): A = A + 2 for i in range(4): A", "6 A = A + 2 for i in range(3): A = A", "6 A = A + 2 for i in range(1): A = A", "A = A + 2 for i in range(6): A = A +", "= A + 3 for i in range(9): A = A + 1", "4 A = A + 2 A = A + 3 A =", "A = A + 8 A = A + 9 A = A", "A + 2 for i in range(6): A = A + 3 for", "A = A + 2 A = A + 2 A = A", "range(2): A = A + 9 A = A + 4 A =", "in range(1): A = A + 4 for i in range(5): A =", "= A + 4 A = A + 1 for i in range(9):", "for i in range(1): A = A + 7 for i in range(4):", "for i in range(2): A = A + 5 A = A +", "A + 5 A = A + 5 for i in range(5): A", "= A + 9 A = A + 2 A = A +", "in range(3): A = A + 7 A = A + 9 A", "in range(1): for i in range(2): A = A + 2 for i", "i in range(4): A = A + 1 A = A + 2", "in range(6): for i in range(1): A = A + 5 A =", "i in range(8): for i in range(4): for i in range(8): A =", "= A + 3 A = A + 8 for i in range(7):", "A + 6 for i in range(1): for i in range(2): A =", "= A + 5 A = A + 4 for i in range(8):", "A + 2 A = A + 5 for i in range(4): A", "6 A = A + 1 A = A + 2 A =", "8 A = A + 9 A = A + 5 for i", "range(6): A = A + 3 for i in range(2): A = A", "= A + 5 for i in range(1): A = A + 8", "A + 3 for i in range(5): A = A + 5 A", "A + 2 A = A + 5 for i in range(1): A", "A = A + 2 A = A + 4 for i in", "7 A = A + 9 A = A + 2 for i", "+ 3 for i in range(5): A = A + 2 A =", "+ 7 A = A + 7 for i in range(7): for i", "A = A + 7 for i in range(4): A = A +", "+ 4 for i in range(6): A = A + 2 for i", "in range(2): A = A + 1 A = A + 7 A", "= A + 1 for i in range(6): A = A + 2", "i in range(5): A = A + 6 A = A + 2", "+ 5 A = A + 1 A = A + 1 A", "= A + 3 for i in range(5): for i in range(1): A", "+ 3 for i in range(9): A = A + 2 A =", "= A + 3 A = A + 7 for i in range(5):", "4 A = A + 1 A = A + 5 A =", "in range(2): A = A + 1 A = A + 5 for", "7 A = A + 1 for i in range(9): for i in", "= A + 4 A = A + 8 A = A +", "7 A = A + 6 A = A + 9 A =", "= A + 8 for i in range(6): A = A + 5", "A + 7 A = A + 1 for i in range(2): A", "= A + 7 A = A + 3 A = A +", "range(9): A = A + 4 A = A + 3 for i", "in range(6): for i in range(2): A = A + 1 A =", "A + 4 A = A + 3 A = A + 7", "A + 7 A = A + 1 A = A + 9", "range(6): A = A + 7 A = A + 7 A =", "+ 2 A = A + 5 A = A + 3 A", "+ 1 for i in range(2): A = A + 3 for i", "A + 6 A = A + 8 A = A + 8", "for i in range(5): A = A + 8 for i in range(3):", "in range(4): for i in range(4): A = A + 8 for i", "A = A + 7 for i in range(9): for i in range(6):", "A + 6 for i in range(9): A = A + 4 A", "A + 8 A = A + 5 A = A + 9", "3 A = A + 6 A = A + 8 for i", "6 A = A + 9 A = A + 2 for i", "+ 2 A = A + 3 for i in range(2): for i", "+ 2 for i in range(7): for i in range(8): for i in", "in range(7): A = A + 5 A = A + 2 for", "in range(2): A = A + 4 for i in range(8): A =", "A + 5 A = A + 7 A = A + 8", "A + 2 A = A + 4 for i in range(5): for", "1 for i in range(6): A = A + 4 A = A", "9 A = A + 1 for i in range(8): A = A", "i in range(9): for i in range(3): A = A + 7 A", "range(6): A = A + 4 A = A + 5 for i", "i in range(9): A = A + 1 A = A + 6", "+ 2 for i in range(2): A = A + 7 A =", "A + 8 A = A + 3 for i in range(5): A", "= A + 8 for i in range(8): A = A + 5", "= A + 2 for i in range(1): A = A + 2", "A + 2 A = A + 7 A = A + 2", "A = A + 2 A = A + 5 A = A", "i in range(7): for i in range(8): for i in range(6): A =", "= A + 5 for i in range(3): A = A + 1", "= A + 9 A = A + 7 for i in range(3):", "A + 7 for i in range(2): for i in range(7): A =", "+ 8 for i in range(6): A = A + 5 for i", "range(9): for i in range(5): A = A + 6 A = A", "6 A = A + 3 for i in range(7): A = A", "in range(5): A = A + 5 A = A + 8 A", "= A + 4 A = A + 6 for i in range(3):", "in range(4): A = A + 5 A = A + 7 for", "A + 5 for i in range(9): A = A + 1 A", "+ 2 A = A + 6 A = A + 1 A", "A + 4 for i in range(3): A = A + 7 A", "8 for i in range(3): A = A + 2 A = A", "+ 3 for i in range(4): A = A + 5 A =", "= A + 9 for i in range(6): A = A + 9", "A + 1 for i in range(9): A = A + 9 A", "+ 4 for i in range(5): for i in range(3): for i in", "range(2): A = A + 4 for i in range(5): for i in", "range(1): A = A + 2 A = A + 5 for i", "+ 1 for i in range(8): for i in range(1): for i in", "i in range(3): A = A + 7 A = A + 7", "A + 1 A = A + 2 A = A + 6", "+ 3 A = A + 9 A = A + 1 A", "+ 1 A = A + 8 A = A + 2 for", "= A + 6 for i in range(7): for i in range(6): for", "7 for i in range(3): A = A + 4 A = A", "8 for i in range(2): A = A + 9 A = A", "= A + 4 A = A + 1 A = A +", "A + 7 for i in range(5): A = A + 6 A", "range(5): A = A + 6 A = A + 5 for i", "i in range(6): A = A + 3 for i in range(4): A", "A = A + 8 for i in range(6): A = A +", "1 A = A + 8 for i in range(7): A = A", "+ 2 A = A + 7 A = A + 4 for", "in range(6): A = A + 8 for i in range(9): A =", "= A + 8 A = A + 2 for i in range(9):", "+ 5 for i in range(9): for i in range(3): A = A", "A + 6 A = A + 2 for i in range(9): for", "6 A = A + 4 for i in range(9): A = A", "range(5): A = A + 3 A = A + 9 A =", "3 for i in range(5): A = A + 9 for i in", "A + 2 for i in range(5): A = A + 8 for", "for i in range(7): for i in range(4): A = A + 6", "A + 8 for i in range(8): for i in range(4): for i", "+ 2 A = A + 5 for i in range(4): A =", "+ 7 for i in range(5): A = A + 1 A =", "range(1): A = A + 3 A = A + 9 A =", "+ 3 A = A + 3 A = A + 3 A", "in range(3): A = A + 7 A = A + 7 A", "3 for i in range(2): A = A + 7 A = A", "range(6): A = A + 2 for i in range(5): A = A", "+ 3 A = A + 6 for i in range(9): A =", "+ 5 A = A + 8 A = A + 7 A", "+ 9 A = A + 5 A = A + 6 A", "= A + 8 A = A + 5 for i in range(1):", "A + 7 A = A + 1 A = A + 4", "+ 5 A = A + 7 A = A + 7 A", "i in range(6): A = A + 4 A = A + 9", "in range(6): A = A + 8 A = A + 1 A", "in range(9): A = A + 1 A = A + 9 A", "+ 3 A = A + 9 for i in range(4): for i", "9 A = A + 1 A = A + 9 A =", "in range(7): for i in range(5): A = A + 7 for i", "range(4): A = A + 2 A = A + 7 A =", "6 A = A + 5 A = A + 5 A =", "+ 1 for i in range(6): A = A + 8 A =", "8 A = A + 3 for i in range(8): A = A", "= A + 4 for i in range(8): A = A + 2", "range(1): for i in range(7): for i in range(2): A = A +", "A + 5 A = A + 5 A = A + 4", "i in range(3): A = A + 4 A = A + 3", "+ 3 A = A + 3 A = A + 6 for", "for i in range(2): A = A + 4 for i in range(8):", "2 A = A + 1 for i in range(1): A = A", "6 A = A + 6 A = A + 2 for i", "+ 5 for i in range(4): A = A + 2 A =", "= A + 5 for i in range(7): A = A + 3", "+ 8 A = A + 5 for i in range(2): A =", "+ 8 A = A + 8 A = A + 7 A", "A + 2 A = A + 8 A = A + 2", "A + 1 for i in range(7): A = A + 7 for", "i in range(4): A = A + 5 A = A + 4", "4 for i in range(3): A = A + 5 A = A", "+ 6 for i in range(9): A = A + 4 A =", "9 A = A + 2 for i in range(2): A = A", "+ 3 for i in range(2): A = A + 4 for i", "+ 3 for i in range(4): A = A + 4 A =", "5 A = A + 3 A = A + 6 A =", "+ 8 for i in range(7): A = A + 5 A =", "9 A = A + 6 for i in range(9): A = A", "= A + 5 A = A + 9 for i in range(8):", "7 A = A + 4 A = A + 2 A =", "i in range(8): A = A + 4 A = A + 7", "for i in range(1): A = A + 4 for i in range(3):", "= A + 9 A = A + 5 for i in range(9):", "A = A + 7 A = A + 8 for i in", "i in range(7): for i in range(4): A = A + 8 A", "A = A + 4 for i in range(2): A = A +", "A + 8 for i in range(2): A = A + 9 A", "+ 5 A = A + 5 A = A + 6 A", "A + 9 A = A + 3 for i in range(1): for", "+ 9 A = A + 3 A = A + 2 for", "i in range(9): for i in range(7): A = A + 5 for", "A = A + 4 for i in range(5): for i in range(6):", "i in range(5): A = A + 6 for i in range(1): A", "+ 8 for i in range(8): A = A + 5 for i", "i in range(5): A = A + 9 for i in range(8): A", "in range(1): A = A + 8 for i in range(8): for i", "7 A = A + 9 A = A + 6 for i", "A + 5 for i in range(4): A = A + 7 A", "= A + 6 for i in range(5): for i in range(6): A", "range(7): A = A + 5 for i in range(2): A = A", "8 for i in range(9): A = A + 8 A = A", "A + 5 for i in range(9): A = A + 5 A", "A + 8 A = A + 6 for i in range(3): for", "+ 6 A = A + 7 A = A + 6 A", "range(6): A = A + 9 A = A + 1 for i", "5 for i in range(1): A = A + 4 for i in", "i in range(8): A = A + 5 for i in range(1): A", "= A + 1 for i in range(1): for i in range(5): A", "A + 6 A = A + 9 A = A + 5", "+ 3 for i in range(5): for i in range(1): A = A", "A + 2 A = A + 6 A = A + 8", "range(3): A = A + 7 A = A + 9 A =", "= A + 1 for i in range(8): A = A + 4", "A + 9 A = A + 1 A = A + 2", "7 for i in range(3): A = A + 6 for i in", "range(1): A = A + 5 A = A + 2 A =", "i in range(2): for i in range(6): A = A + 9 A", "i in range(4): A = A + 7 A = A + 9", "5 A = A + 5 for i in range(4): A = A", "A + 3 for i in range(6): for i in range(2): A =", "for i in range(9): A = A + 5 for i in range(1):", "A + 2 for i in range(7): A = A + 5 A", "= A + 4 A = A + 4 for i in range(8):", "= A + 8 A = A + 5 for i in range(2):", "range(6): A = A + 9 A = A + 3 for i", "7 for i in range(5): A = A + 1 for i in", "+ 5 for i in range(8): A = A + 6 for i", "i in range(8): A = A + 8 A = A + 1", "+ 7 A = A + 7 for i in range(5): A =", "A + 4 A = A + 7 for i in range(2): for", "range(3): A = A + 4 for i in range(7): A = A", "A = A + 1 A = A + 4 A = A", "A + 9 A = A + 7 for i in range(9): A", "range(4): A = A + 7 A = A + 7 A =", "A = A + 7 for i in range(7): A = A +", "+ 6 A = A + 5 A = A + 9 for", "A = A + 1 for i in range(5): A = A +", "range(7): A = A + 8 for i in range(6): A = A", "+ 8 A = A + 7 A = A + 1 A", "A + 1 A = A + 6 A = A + 5", "i in range(1): A = A + 8 A = A + 4", "range(4): A = A + 6 A = A + 6 for i", "A + 5 for i in range(6): A = A + 7 for", "A + 7 for i in range(3): A = A + 6 for", "A + 6 for i in range(3): A = A + 3 A", "= A + 2 A = A + 8 for i in range(4):", "A + 7 A = A + 1 for i in range(9): for", "6 A = A + 2 for i in range(5): A = A", "A + 6 A = A + 6 A = A + 4", "i in range(5): A = A + 6 A = A + 9", "+ 5 for i in range(3): A = A + 3 A =", "i in range(5): for i in range(3): for i in range(6): A =", "+ 9 for i in range(5): A = A + 2 A =", "= A + 9 A = A + 3 A = A +", "i in range(3): A = A + 2 A = A + 9", "A = A + 7 for i in range(9): A = A +", "for i in range(2): A = A + 6 for i in range(4):", "for i in range(5): A = A + 5 for i in range(1):", "i in range(5): A = A + 5 A = A + 4", "+ 2 A = A + 8 A = A + 3 for", "in range(7): A = A + 3 A = A + 9 A", "+ 5 A = A + 6 for i in range(2): A =", "in range(5): A = A + 6 for i in range(1): A =", "A = A + 3 for i in range(8): for i in range(8):", "range(5): for i in range(1): A = A + 4 A = A", "6 A = A + 4 for i in range(3): A = A", "A + 4 A = A + 1 A = A + 5", "= A + 2 A = A + 1 for i in range(9):", "i in range(8): A = A + 6 for i in range(4): A", "for i in range(9): A = A + 3 A = A +", "7 A = A + 8 A = A + 5 A =", "A + 8 for i in range(2): A = A + 6 for", "A + 2 for i in range(4): A = A + 7 A", "+ 3 for i in range(1): A = A + 2 A =", "= A + 6 for i in range(2): A = A + 1", "A + 9 A = A + 2 for i in range(1): A", "range(2): for i in range(7): A = A + 3 A = A", "= A + 2 for i in range(2): A = A + 4", "in range(6): A = A + 2 for i in range(5): A =", "i in range(2): A = A + 3 A = A + 2", "= A + 1 A = A + 8 A = A +", "A + 4 A = A + 6 for i in range(7): A", "= A + 2 A = A + 5 for i in range(8):", "in range(8): A = A + 9 A = A + 6 A", "+ 3 A = A + 9 for i in range(7): A =", "A + 1 A = A + 8 A = A + 2", "in range(4): A = A + 5 A = A + 4 for", "range(5): for i in range(6): A = A + 5 A = A", "A + 3 for i in range(2): A = A + 7 A", "= A + 9 A = A + 7 for i in range(5):", "in range(1): A = A + 5 A = A + 7 A", "+ 2 A = A + 1 for i in range(8): for i", "range(5): A = A + 9 for i in range(8): A = A", "+ 3 A = A + 5 A = A + 1 for", "A + 3 for i in range(3): A = A + 4 A", "= A + 5 for i in range(9): A = A + 5", "+ 1 A = A + 1 A = A + 3 A", "+ 1 for i in range(1): A = A + 4 A =", "= A + 4 A = A + 4 for i in range(3):", "= A + 2 for i in range(5): A = A + 9", "A + 4 for i in range(6): A = A + 6 A", "2 A = A + 4 for i in range(6): A = A", "for i in range(8): for i in range(4): for i in range(8): A", "A + 8 A = A + 4 for i in range(5): for", "1 A = A + 7 A = A + 7 A =", "9 for i in range(2): for i in range(6): A = A +", "A = A + 6 for i in range(5): for i in range(6):", "in range(5): A = A + 3 A = A + 6 for", "A = A + 7 A = A + 2 for i in", "range(3): A = A + 5 A = A + 5 A =", "= A + 4 for i in range(2): A = A + 7", "+ 4 A = A + 2 A = A + 4 for", "= A + 1 A = A + 2 for i in range(4):", "A = A + 7 for i in range(1): for i in range(1):", "A + 3 A = A + 2 A = A + 6", "in range(6): A = A + 9 A = A + 2 A", "6 A = A + 8 A = A + 8 A =", "= A + 8 for i in range(1): A = A + 2", "i in range(9): A = A + 3 A = A + 4", "7 for i in range(5): A = A + 9 A = A", "= A + 4 for i in range(5): for i in range(6): A", "A + 4 for i in range(1): for i in range(7): for i", "A + 2 for i in range(1): A = A + 2 A", "+ 6 for i in range(6): A = A + 9 A =", "i in range(1): for i in range(7): for i in range(2): A =", "range(8): A = A + 5 A = A + 6 for i", "2 A = A + 7 for i in range(4): A = A", "A + 9 A = A + 3 for i in range(2): A", "+ 2 A = A + 8 A = A + 7 A", "= A + 7 for i in range(6): A = A + 7", "A + 4 A = A + 2 A = A + 4", "3 A = A + 5 A = A + 1 for i", "A = A + 3 for i in range(4): A = A +", "in range(6): for i in range(1): A = A + 3 for i", "= A + 7 for i in range(5): A = A + 9", "A + 2 A = A + 4 A = A + 3", "1 A = A + 5 for i in range(3): A = A", "+ 9 A = A + 8 for i in range(5): A =", "= A + 7 A = A + 4 for i in range(8):", "range(7): for i in range(4): A = A + 8 A = A", "A + 3 A = A + 3 A = A + 6", "5 A = A + 5 for i in range(5): A = A", "for i in range(9): for i in range(5): A = A + 9", "A = A + 8 for i in range(4): for i in range(4):", "6 A = A + 4 A = A + 9 A =", "i in range(5): for i in range(4): A = A + 5 A", "A + 8 A = A + 1 A = A + 4", "= A + 3 for i in range(3): A = A + 3", "A + 7 A = A + 2 for i in range(7): for", "= A + 7 A = A + 8 A = A +", "+ 2 for i in range(1): A = A + 6 A =", "5 for i in range(1): A = A + 2 for i in", "A + 8 A = A + 6 A = A + 1", "A + 7 A = A + 2 A = A + 1", "A = A + 2 for i in range(3): A = A +", "6 for i in range(5): A = A + 3 A = A", "A + 5 for i in range(4): A = A + 5 A", "A = A + 7 A = A + 4 A = A", "range(7): A = A + 8 for i in range(7): A = A", "A = A + 3 for i in range(8): A = A +", "6 for i in range(6): A = A + 9 A = A", "= A + 6 for i in range(1): A = A + 4", "A + 1 A = A + 2 A = A + 8", "9 A = A + 4 for i in range(2): A = A", "= A + 9 A = A + 4 A = A +", "i in range(7): for i in range(5): A = A + 7 for", "+ 8 A = A + 3 A = A + 9 A", "= A + 4 for i in range(8): for i in range(7): A", "5 A = A + 1 for i in range(8): A = A", "i in range(3): for i in range(6): A = A + 8 A", "i in range(7): A = A + 3 A = A + 6", "A = A + 1 A = A + 2 A = A", "9 A = A + 5 for i in range(2): A = A", "range(6): A = A + 3 for i in range(4): A = A", "6 for i in range(4): A = A + 8 A = A", "range(7): for i in range(6): for i in range(9): A = A +", "3 for i in range(3): A = A + 3 A = A", "for i in range(9): for i in range(7): A = A + 5", "+ 6 for i in range(1): A = A + 3 A =", "range(8): for i in range(1): A = A + 7 A = A", "range(6): A = A + 8 A = A + 4 for i", "for i in range(2): A = A + 3 for i in range(5):", "in range(1): A = A + 6 A = A + 8 for", "+ 5 A = A + 2 A = A + 6 A", "A + 2 for i in range(6): for i in range(1): A =", "= A + 3 A = A + 5 for i in range(7):", "= A + 7 for i in range(1): for i in range(1): for", "for i in range(9): for i in range(5): A = A + 6", "for i in range(2): A = A + 3 A = A +", "range(8): A = A + 4 for i in range(3): A = A", "= A + 3 A = A + 2 for i in range(6):", "9 A = A + 6 A = A + 9 for i", "i in range(5): A = A + 8 A = A + 7", "i in range(8): for i in range(6): A = A + 4 A", "range(4): for i in range(1): A = A + 6 A = A", "= A + 9 A = A + 3 for i in range(9):", "7 for i in range(2): for i in range(7): A = A +", "i in range(6): A = A + 4 A = A + 5", "range(2): A = A + 1 A = A + 5 for i", "A = A + 7 A = A + 1 A = A", "4 A = A + 7 A = A + 6 A =", "+ 5 A = A + 6 for i in range(1): A =", "2 for i in range(1): A = A + 4 for i in", "3 for i in range(6): for i in range(1): A = A +", "in range(9): A = A + 9 A = A + 5 A", "+ 2 for i in range(3): A = A + 3 for i", "4 for i in range(6): A = A + 6 for i in", "5 A = A + 5 for i in range(2): A = A", "+ 6 for i in range(4): A = A + 8 A =", "range(5): A = A + 3 for i in range(6): for i in", "for i in range(1): A = A + 1 for i in range(6):", "+ 7 A = A + 2 for i in range(7): for i", "i in range(7): A = A + 2 A = A + 2", "+ 1 A = A + 8 A = A + 7 A", "+ 1 A = A + 9 for i in range(3): A =", "= A + 4 for i in range(3): for i in range(2): A", "+ 1 A = A + 4 A = A + 1 A", "= A + 9 for i in range(1): A = A + 6", "A = A + 9 for i in range(3): A = A +", "i in range(3): A = A + 1 A = A + 8", "9 A = A + 8 A = A + 3 A =", "for i in range(9): A = A + 4 A = A +", "2 A = A + 5 for i in range(9): A = A", "A + 3 A = A + 3 A = A + 3", "8 for i in range(7): A = A + 7 A = A", "range(3): A = A + 4 A = A + 3 A =", "A = A + 6 for i in range(4): for i in range(2):", "range(7): A = A + 3 for i in range(5): A = A", "A + 2 A = A + 5 for i in range(9): A", "A + 3 for i in range(8): for i in range(8): A =", "range(7): A = A + 2 A = A + 2 A =", "in range(2): A = A + 2 A = A + 9 A", "+ 9 A = A + 2 for i in range(8): A =", "9 A = A + 5 A = A + 3 for i", "A + 7 for i in range(5): for i in range(4): A =", "A = A + 6 A = A + 4 for i in", "A + 6 A = A + 5 for i in range(1): for", "A + 5 A = A + 8 A = A + 2", "A = A + 6 A = A + 6 A = A", "+ 9 A = A + 8 A = A + 3 A", "A + 5 for i in range(3): for i in range(5): A =", "A + 6 A = A + 9 A = A + 2", "i in range(3): A = A + 5 A = A + 3", "3 for i in range(5): A = A + 3 for i in", "in range(6): A = A + 6 A = A + 1 A", "in range(2): A = A + 7 for i in range(1): A =", "A + 4 A = A + 4 for i in range(8): A", "in range(2): A = A + 4 A = A + 9 A", "range(9): A = A + 1 A = A + 6 for i", "for i in range(1): A = A + 2 for i in range(5):", "7 A = A + 3 for i in range(2): A = A", "= A + 6 for i in range(5): A = A + 3", "1 for i in range(1): A = A + 4 A = A", "= A + 7 for i in range(7): A = A + 2", "i in range(3): A = A + 9 A = A + 1", "= A + 2 for i in range(7): for i in range(8): for", "A + 8 A = A + 8 A = A + 7", "8 A = A + 5 for i in range(2): A = A", "i in range(9): A = A + 9 A = A + 8", "A = A + 3 for i in range(7): A = A +", "A + 5 A = A + 9 for i in range(2): for", "for i in range(4): for i in range(2): A = A + 3", "in range(7): for i in range(6): for i in range(9): A = A", "5 A = A + 4 for i in range(8): A = A", "A + 8 for i in range(8): for i in range(5): A =", "+ 5 for i in range(2): A = A + 5 A =", "range(9): A = A + 9 A = A + 2 for i", "+ 3 A = A + 7 for i in range(8): for i", "for i in range(8): A = A + 5 for i in range(8):", "i in range(7): A = A + 8 for i in range(7): A", "A + 2 for i in range(6): A = A + 9 A", "range(2): A = A + 4 A = A + 1 A =", "A = A + 7 A = A + 2 A = A", "3 for i in range(9): A = A + 9 A = A", "+ 5 A = A + 4 A = A + 2 A", "= A + 3 for i in range(9): A = A + 2", "= A + 1 A = A + 1 A = A +", "i in range(8): for i in range(1): for i in range(4): A =", "A + 7 for i in range(8): A = A + 6 A", "A + 2 for i in range(2): A = A + 6 for", "i in range(6): A = A + 5 A = A + 1", "A = A + 4 for i in range(6): A = A +", "i in range(6): A = A + 3 for i in range(2): A", "+ 4 for i in range(2): A = A + 3 A =", "in range(4): A = A + 6 for i in range(3): A =", "+ 6 A = A + 5 for i in range(1): for i", "+ 5 A = A + 9 for i in range(2): for i", "for i in range(3): A = A + 7 for i in range(3):", "i in range(3): A = A + 6 for i in range(9): A", "= A + 3 for i in range(8): A = A + 7", "A + 2 for i in range(7): for i in range(8): for i", "range(6): A = A + 7 for i in range(7): A = A", "A = A + 4 A = A + 2 A = A", "+ 8 A = A + 6 A = A + 1 A", "A + 3 for i in range(9): A = A + 9 A", "2 A = A + 5 for i in range(1): A = A", "A + 7 A = A + 1 for i in range(6): A", "i in range(5): for i in range(6): A = A + 8 for", "for i in range(8): for i in range(8): A = A + 4", "in range(5): A = A + 4 A = A + 8 A", "3 A = A + 3 for i in range(5): A = A", "= A + 5 for i in range(2): A = A + 1", "in range(2): A = A + 3 A = A + 5 A", "+ 1 for i in range(9): for i in range(7): A = A", "for i in range(9): A = A + 9 A = A +", "A = A + 3 for i in range(3): A = A +", "3 A = A + 1 A = A + 2 A =", "+ 4 for i in range(9): for i in range(3): A = A", "range(6): A = A + 2 A = A + 8 A =", "A = A + 1 for i in range(1): A = A +", "i in range(2): A = A + 3 A = A + 7", "+ 7 A = A + 9 A = A + 6 for", "range(6): A = A + 9 for i in range(5): A = A", "1 for i in range(1): A = A + 1 for i in", "+ 7 A = A + 9 A = A + 8 A", "A + 9 A = A + 5 A = A + 9", "4 for i in range(2): A = A + 7 for i in", "4 for i in range(9): A = A + 3 A = A", "= A + 2 A = A + 4 A = A +", "i in range(1): A = A + 5 for i in range(7): A", "for i in range(9): A = A + 6 for i in range(1):", "+ 2 A = A + 4 A = A + 3 for", "A = A + 6 A = A + 6 for i in", "+ 7 A = A + 2 A = A + 1 A", "+ 4 A = A + 1 A = A + 5 A", "1 A = A + 5 A = A + 2 A =", "A + 1 for i in range(8): A = A + 4 A", "range(1): A = A + 6 A = A + 8 for i", "A + 5 A = A + 8 for i in range(8): A", "A + 8 A = A + 7 A = A + 4", "= A + 4 for i in range(3): A = A + 7", "= A + 6 for i in range(2): A = A + 4", "A = A + 1 A = A + 3 for i in", "range(1): A = A + 4 A = A + 2 for i", "A + 3 for i in range(5): for i in range(1): A =", "for i in range(3): A = A + 2 A = A +", "A + 2 A = A + 4 for i in range(6): A", "i in range(3): A = A + 6 for i in range(5): A", "A + 5 for i in range(5): A = A + 5 for", "range(8): A = A + 9 for i in range(9): for i in", "4 A = A + 2 for i in range(9): for i in", "+ 1 A = A + 9 A = A + 8 for", "+ 6 for i in range(9): A = A + 3 for i", "2 for i in range(2): A = A + 3 for i in", "9 A = A + 5 for i in range(3): A = A", "range(3): A = A + 3 A = A + 7 for i", "A + 2 A = A + 7 A = A + 5", "+ 6 for i in range(2): A = A + 1 A =", "1 A = A + 8 A = A + 7 A =", "+ 6 for i in range(2): A = A + 3 A =", "9 A = A + 3 for i in range(9): A = A", "range(7): for i in range(8): for i in range(6): A = A +", "in range(3): A = A + 3 A = A + 9 A", "A = A + 6 A = A + 8 for i in", "A = A + 6 for i in range(6): A = A +", "1 for i in range(2): A = A + 3 for i in", "for i in range(4): A = A + 6 for i in range(3):", "6 for i in range(9): A = A + 3 A = A", "A + 8 A = A + 3 for i in range(8): A", "7 A = A + 6 A = A + 7 for i", "3 A = A + 7 for i in range(5): A = A", "A + 8 A = A + 9 A = A + 3", "in range(5): for i in range(6): A = A + 8 for i", "= A + 4 A = A + 6 for i in range(7):", "= A + 6 A = A + 3 for i in range(5):", "in range(5): A = A + 1 for i in range(9): for i", "8 A = A + 5 A = A + 1 A =", "A + 8 A = A + 2 A = A + 5", "in range(5): A = A + 9 A = A + 4 for", "+ 9 A = A + 4 A = A + 6 for", "A = A + 2 for i in range(8): for i in range(6):", "= A + 6 A = A + 5 for i in range(7):", "in range(4): A = A + 2 A = A + 1 for", "+ 9 for i in range(5): A = A + 5 for i", "range(7): A = A + 4 for i in range(6): A = A", "7 A = A + 4 for i in range(4): for i in", "in range(5): A = A + 9 A = A + 7 for", "+ 3 A = A + 9 A = A + 7 for", "= A + 5 for i in range(3): A = A + 7", "+ 3 for i in range(3): A = A + 3 A =", "range(7): A = A + 5 A = A + 2 for i", "= A + 9 A = A + 1 for i in range(8):", "for i in range(8): A = A + 8 A = A +", "4 A = A + 7 for i in range(2): for i in", "in range(1): A = A + 3 for i in range(1): A =", "3 A = A + 6 for i in range(5): A = A", "8 for i in range(9): A = A + 5 A = A", "= A + 2 A = A + 7 for i in range(4):", "range(8): for i in range(7): A = A + 2 for i in", "+ 9 for i in range(5): A = A + 9 A =", "A = A + 9 for i in range(1): A = A +", "+ 4 for i in range(5): for i in range(6): A = A", "for i in range(1): A = A + 4 for i in range(5):", "+ 7 A = A + 8 A = A + 5 A", "+ 4 A = A + 3 for i in range(6): for i", "in range(4): for i in range(2): A = A + 3 for i", "for i in range($1) A = 0 for i in range(2): A =", "= A + 3 A = A + 9 A = A +", "8 A = A + 3 A = A + 3 A =", "+ 3 A = A + 2 A = A + 1 A", "= A + 1 A = A + 3 A = A +", "= A + 9 A = A + 6 for i in range(9):", "+ 9 for i in range(4): for i in range(1): A = A", "for i in range(8): A = A + 5 for i in range(1):", "A + 4 for i in range(2): A = A + 9 for", "A = A + 8 for i in range(8): for i in range(4):", "+ 4 for i in range(8): A = A + 9 for i", "A + 9 A = A + 1 A = A + 3", "7 A = A + 7 for i in range(5): A = A", "= A + 1 for i in range(6): A = A + 1", "= A + 3 for i in range(5): for i in range(6): A", "5 A = A + 1 A = A + 1 A =", "for i in range(6): A = A + 7 for i in range(4):", "for i in range(7): A = A + 8 for i in range(6):", "2 A = A + 8 A = A + 3 for i", "A + 7 for i in range(4): A = A + 7 A", "A = A + 9 A = A + 6 A = A", "4 A = A + 1 for i in range(6): A = A", "+ 2 for i in range(1): A = A + 2 A =", "+ 6 A = A + 2 for i in range(9): for i", "+ 7 for i in range(1): A = A + 8 A =", "+ 1 for i in range(1): A = A + 8 A =", "A + 5 A = A + 4 for i in range(9): for", "A = A + 7 A = A + 7 for i in", "A + 4 A = A + 3 A = A + 6", "+ 5 A = A + 5 A = A + 7 A", "A = A + 2 A = A + 5 for i in", "range(9): A = A + 9 A = A + 5 A =", "+ 4 A = A + 8 for i in range(4): A =", "4 for i in range(9): for i in range(3): A = A +", "i in range(2): A = A + 4 A = A + 7", "A = A + 8 A = A + 3 A = A", "i in range(5): A = A + 7 A = A + 5", "A + 8 for i in range(9): A = A + 5 A", "A + 6 for i in range(5): A = A + 9 A", "2 A = A + 5 for i in range(8): for i in", "+ 1 A = A + 9 for i in range(3): for i", "for i in range(1): A = A + 6 A = A +", "in range(6): A = A + 1 for i in range(9): for i", "= A + 4 A = A + 8 for i in range(4):", "1 A = A + 6 for i in range(5): A = A", "in range(3): for i in range(4): A = A + 9 for i", "= A + 1 A = A + 5 for i in range(9):", "for i in range(8): for i in range(7): A = A + 2", "= A + 6 for i in range(9): A = A + 4", "range(8): A = A + 5 for i in range(3): A = A", "+ 4 A = A + 1 for i in range(9): A =", "= A + 6 for i in range(1): A = A + 7", "A = A + 8 A = A + 2 A = A", "2 A = A + 8 for i in range(4): for i in", "A + 3 for i in range(5): A = A + 9 for", "for i in range(5): for i in range(6): A = A + 5", "= A + 5 for i in range(1): A = A + 5", "i in range(4): A = A + 9 for i in range(5): A", "A = A + 1 for i in range(8): A = A +", "for i in range(7): for i in range(6): for i in range(9): A", "5 A = A + 8 A = A + 2 A =", "+ 8 A = A + 7 A = A + 9 A", "5 for i in range(1): A = A + 6 for i in", "+ 6 for i in range(5): for i in range(6): A = A", "A = A + 4 A = A + 8 A = A", "+ 1 A = A + 3 for i in range(5): for i", "= A + 7 A = A + 1 A = A +", "= A + 4 A = A + 5 for i in range(9):", "3 A = A + 2 for i in range(6): A = A", "A = A + 4 A = A + 1 A = A", "in range(8): for i in range(4): for i in range(8): A = A", "in range(4): A = A + 9 for i in range(5): A =", "= A + 8 A = A + 4 for i in range(5):", "A + 1 A = A + 5 for i in range(3): A", "for i in range(7): A = A + 2 for i in range(5):", "= A + 1 A = A + 9 A = A +", "+ 5 A = A + 2 for i in range(2): A =", "A + 3 A = A + 7 for i in range(8): for", "= A + 7 for i in range(1): A = A + 8", "A + 7 for i in range(5): A = A + 1 for", "9 for i in range(8): A = A + 9 A = A", "5 A = A + 9 for i in range(8): A = A", "= A + 5 A = A + 2 for i in range(2):", "3 A = A + 1 A = A + 7 A =", "= A + 3 for i in range(8): for i in range(8): A", "+ 7 A = A + 1 for i in range(2): A =", "2 A = A + 2 A = A + 7 for i", "range(3): A = A + 7 A = A + 7 A =", "A = A + 8 for i in range(5): A = A +", "in range(3): A = A + 3 A = A + 7 for", "= A + 5 for i in range(4): A = A + 5", "i in range(5): A = A + 9 A = A + 3", "i in range(9): A = A + 8 for i in range(9): A", "A = A + 5 for i in range(9): for i in range(7):", "+ 4 A = A + 7 for i in range(2): for i", "5 for i in range(2): A = A + 6 for i in", "8 for i in range(2): A = A + 7 A = A", "A + 7 A = A + 4 A = A + 5", "for i in range(3): for i in range(6): A = A + 8", "range(8): A = A + 4 A = A + 2 A =", "A = A + 2 for i in range(8): A = A +", "3 A = A + 9 A = A + 1 A =", "= A + 1 A = A + 6 for i in range(5):", "9 for i in range(6): A = A + 9 A = A", "8 A = A + 6 for i in range(3): for i in", "in range(2): A = A + 4 A = A + 1 A", "= A + 9 A = A + 3 for i in range(2):", "A + 8 A = A + 1 A = A + 6", "A + 4 for i in range(3): A = A + 1 A", "9 A = A + 4 for i in range(9): A = A", "= A + 5 A = A + 6 for i in range(2):", "8 A = A + 4 for i in range(5): for i in", "range(4): A = A + 6 for i in range(3): A = A", "in range(9): A = A + 4 A = A + 9 A", "+ 7 for i in range(5): A = A + 3 for i", "7 A = A + 2 for i in range(5): A = A", "i in range(7): A = A + 1 for i in range(2): A", "in range(8): A = A + 5 A = A + 6 for", "range(4): A = A + 2 A = A + 1 for i", "+ 3 A = A + 4 A = A + 2 A", "= A + 8 A = A + 4 A = A +", "4 for i in range(5): for i in range(3): for i in range(6):", "A + 7 for i in range(7): for i in range(7): for i", "for i in range(4): A = A + 4 A = A +", "= A + 2 for i in range(1): A = A + 4", "+ 7 A = A + 1 for i in range(9): for i", "for i in range(5): A = A + 6 for i in range(1):", "A + 2 for i in range(4): A = A + 6 A", "in range(9): A = A + 2 for i in range(1): A =", "7 for i in range(9): A = A + 6 A = A", "i in range(9): for i in range(3): A = A + 4 for", "A + 9 A = A + 4 A = A + 6", "A + 8 for i in range(2): A = A + 7 for", "i in range(6): A = A + 5 A = A + 5", "range(9): for i in range(3): A = A + 7 A = A", "for i in range(1): A = A + 4 for i in range(2):", "i in range(2): A = A + 9 A = A + 4", "+ 6 for i in range(2): A = A + 2 A =", "+ 4 for i in range(6): A = A + 9 for i", "+ 3 for i in range(2): A = A + 7 A =", "5 A = A + 3 A = A + 9 for i", "range(1): A = A + 4 A = A + 4 for i", "A + 3 A = A + 6 for i in range(5): A", "7 A = A + 9 A = A + 8 A =", "range(1): A = A + 1 for i in range(8): A = A", "4 for i in range(2): A = A + 3 A = A", "A + 7 for i in range(1): for i in range(1): for i", "A + 6 A = A + 4 A = A + 6", "range(6): A = A + 8 A = A + 5 for i", "A + 1 A = A + 3 A = A + 8", "in range(2): A = A + 3 A = A + 9 A", "in range(4): A = A + 2 for i in range(2): A =", "= A + 7 for i in range(7): A = A + 3", "in range(6): A = A + 7 A = A + 7 A", "8 for i in range(5): A = A + 6 A = A", "range(3): for i in range(5): A = A + 4 for i in", "in range(3): A = A + 2 A = A + 9 A", "+ 2 A = A + 1 A = A + 4 A", "range(2): A = A + 7 for i in range(1): A = A", "+ 5 A = A + 5 A = A + 6 for", "A + 7 A = A + 5 A = A + 2", "3 for i in range(5): A = A + 5 A = A", "in range(5): for i in range(1): A = A + 4 A =", "= A + 5 A = A + 3 for i in range(3):", "+ 2 for i in range(6): A = A + 3 for i", "2 A = A + 1 A = A + 5 for i", "+ 7 A = A + 7 A = A + 5 A", "1 for i in range(9): for i in range(6): A = A +", "A + 5 A = A + 7 A = A + 7", "+ 9 A = A + 5 for i in range(2): A =", "1 for i in range(8): A = A + 8 A = A", "7 A = A + 3 A = A + 5 for i", "+ 1 for i in range(8): A = A + 8 A =", "4 for i in range(9): A = A + 2 A = A", "range(1): for i in range(4): A = A + 6 A = A", "for i in range(1): A = A + 2 A = A +", "A = A + 6 for i in range(5): A = A +", "+ 2 A = A + 4 for i in range(6): A =", "3 A = A + 9 A = A + 4 A =", "= A + 7 for i in range(5): A = A + 6", "i in range(1): for i in range(5): A = A + 6 A", "= A + 6 for i in range(5): A = A + 9", "i in range(7): for i in range(4): A = A + 6 A", "9 A = A + 5 A = A + 6 A =", "i in range(8): A = A + 4 for i in range(3): A", "range(2): A = A + 4 for i in range(2): A = A", "for i in range(4): A = A + 2 for i in range(2):", "= A + 8 A = A + 8 for i in range(3):", "= A + 2 for i in range(4): A = A + 6", "A + 1 A = A + 7 for i in range(1): A", "= A + 5 for i in range(3): A = A + 6", "i in range(6): for i in range(9): A = A + 7 A", "A = A + 2 for i in range(7): for i in range(8):", "i in range(9): A = A + 6 A = A + 8", "+ 1 A = A + 5 A = A + 2 A", "A + 5 for i in range(8): for i in range(6): A =", "+ 8 A = A + 5 A = A + 9 for", "9 A = A + 3 for i in range(2): A = A", "A + 5 A = A + 1 A = A + 1", "i in range(5): A = A + 7 A = A + 2", "range(8): A = A + 9 A = A + 6 A =", "A + 4 for i in range(6): A = A + 2 for", "in range(1): A = A + 4 for i in range(2): A =", "A + 4 A = A + 2 A = A + 8", "+ 5 A = A + 3 for i in range(3): A =", "for i in range(3): A = A + 4 A = A +", "range(8): for i in range(8): A = A + 4 A = A", "REPEAT (\\d) -> for i in range($1) A = 0 for i in", "in range(8): for i in range(1): for i in range(4): A = A", "+ 1 for i in range(8): A = A + 7 A =", "+ 5 A = A + 5 for i in range(5): A =", "6 A = A + 2 for i in range(9): for i in", "= A + 2 for i in range(6): A = A + 9", "A + 4 A = A + 9 for i in range(1): A", "9 A = A + 5 A = A + 5 A =", "+ 8 for i in range(1): A = A + 2 for i", "+ 1 for i in range(9): A = A + 2 A =", "in range(9): A = A + 9 A = A + 2 for", "= A + 4 A = A + 1 for i in range(6):", "= A + 7 A = A + 2 for i in range(3):", "range(1): for i in range(2): A = A + 4 A = A", "A = A + 3 for i in range(1): A = A +", "range(2): A = A + 3 A = A + 5 A =", "A + 7 for i in range(9): A = A + 6 A", "+ 2 for i in range(9): for i in range(3): A = A", "in range(5): A = A + 7 A = A + 2 A", "for i in range(6): A = A + 3 for i in range(4):", "+ 8 for i in range(6): A = A + 4 A =", "for i in range(2): A = A + 7 for i in range(8):", "2 A = A + 6 A = A + 1 A =", "+ 8 A = A + 8 for i in range(3): A =", "+ 4 for i in range(4): for i in range(9): A = A", "for i in range(5): A = A + 1 A = A +", "= A + 4 A = A + 5 for i in range(3):", "in range(9): A = A + 2 A = A + 7 A", "+ 1 A = A + 8 A = A + 3 A", "for i in range(2): A = A + 4 for i in range(5):", "= A + 3 A = A + 6 for i in range(6):", "2 A = A + 9 A = A + 1 A =", "range(9): for i in range(6): A = A + 4 A = A", "+ 5 A = A + 2 for i in range(7): for i", "+ 2 A = A + 5 for i in range(8): for i", "A + 7 A = A + 7 for i in range(7): for", "for i in range(7): A = A + 5 for i in range(2):", "-> for i in range($1) A = 0 for i in range(2): A", "in range(7): for i in range(4): A = A + 8 A =", "A = A + 3 A = A + 9 A = A", "A = A + 6 A = A + 5 A = A", "= A + 6 for i in range(2): A = A + 2", "A = A + 9 A = A + 3 A = A", "+ 5 A = A + 1 A = A + 2 A", "9 A = A + 8 for i in range(4): A = A", "+ 6 A = A + 7 for i in range(4): for i", "+ 9 A = A + 5 A = A + 3 A", "1 A = A + 7 A = A + 3 for i", "A + 4 A = A + 2 A = A + 2", "range(6): A = A + 5 for i in range(6): A = A", "3 A = A + 9 A = A + 6 for i", "A = A + 6 A = A + 3 for i in", "in range(6): A = A + 7 for i in range(4): A =", "in range(6): A = A + 5 A = A + 3 A", "7 A = A + 1 A = A + 5 for i", "for i in range(3): A = A + 4 for i in range(7):", "A + 8 for i in range(7): A = A + 4 for", "+ 3 for i in range(5): for i in range(6): A = A", "+ 5 A = A + 8 A = A + 8 A", "A + 6 A = A + 1 A = A + 7", "A + 1 A = A + 6 for i in range(2): A", "range(3): A = A + 7 A = A + 1 for i", "= A + 9 for i in range(4): for i in range(1): A", "= A + 9 A = A + 2 for i in range(1):", "in range(5): A = A + 5 for i in range(3): for i", "1 A = A + 1 A = A + 5 for i", "for i in range(5): for i in range(4): A = A + 5", "i in range(5): A = A + 7 for i in range(5): A", "5 A = A + 7 A = A + 2 for i", "A + 4 A = A + 3 A = A + 3", "i in range(7): A = A + 5 A = A + 5", "= A + 7 for i in range(6): A = A + 4", "1 A = A + 7 A = A + 4 for i", "2 for i in range(9): A = A + 5 for i in", "6 for i in range(2): A = A + 1 A = A", "A = A + 3 for i in range(2): for i in range(8):", "= A + 9 A = A + 8 for i in range(4):", "A = A + 7 for i in range(3): A = A +", "A + 2 A = A + 3 for i in range(2): for", "i in range(7): A = A + 5 for i in range(2): A", "in range(9): A = A + 8 for i in range(9): A =", "A + 7 A = A + 4 A = A + 2", "7 for i in range(7): for i in range(7): for i in range(5):", "for i in range(1): A = A + 7 A = A +", "in range(8): for i in range(5): A = A + 9 A =", "A + 5 A = A + 6 for i in range(1): A", "A + 9 A = A + 6 A = A + 9", "for i in range(7): for i in range(4): A = A + 8", "range(4): A = A + 6 A = A + 4 A =", "A + 5 for i in range(7): A = A + 8 for", "= A + 5 for i in range(2): A = A + 6", "9 for i in range(1): A = A + 8 for i in", "A + 7 for i in range(3): A = A + 9 A", "+ 6 A = A + 4 A = A + 6 for", "i in range(3): A = A + 4 A = A + 4", "range(9): A = A + 3 A = A + 4 A =", "(\\d) -> for i in range($1) A = 0 for i in range(2):", "+ 6 A = A + 6 for i in range(9): A =", "+ 6 A = A + 3 for i in range(7): A =", "1 for i in range(5): A = A + 6 for i in", "in range(8): A = A + 9 A = A + 8 for", "in range(9): A = A + 5 A = A + 4 for", "+ 6 for i in range(9): A = A + 6 for i", "A = A + 4 A = A + 9 A = A", "A = A + 2 for i in range(4): A = A +", "2 A = A + 2 A = A + 8 for i", "8 for i in range(8): for i in range(4): for i in range(8):", "A = A + 3 A = A + 2 A = A", "in range(5): A = A + 5 A = A + 4 A", "A + 5 A = A + 3 for i in range(3): A", "for i in range(9): for i in range(2): A = A + 3", "for i in range(8): A = A + 9 for i in range(5):", "in range(1): A = A + 4 for i in range(9): A =", "2 A = A + 3 for i in range(2): for i in", "in range(3): A = A + 1 for i in range(1): for i", "+ 6 A = A + 8 for i in range(2): A =", "= A + 7 A = A + 1 for i in range(9):", "A = A + 7 A = A + 3 for i in", "A + 5 A = A + 3 A = A + 9", "for i in range(1): for i in range(1): A = A + 4", "+ 5 for i in range(2): for i in range(3): A = A", "A + 9 A = A + 1 for i in range(4): for", "7 for i in range(5): A = A + 6 A = A", "5 A = A + 2 A = A + 6 A =", "9 A = A + 1 for i in range(1): A = A", "A + 5 A = A + 2 A = A + 6", "A = A + 5 A = A + 3 A = A", "A + 5 for i in range(2): A = A + 5 A", "= A + 5 for i in range(7): A = A + 5", "A + 1 A = A + 5 for i in range(2): A", "+ 4 A = A + 4 A = A + 2 A", "A + 5 for i in range(6): A = A + 3 for", "= A + 4 for i in range(1): A = A + 5", "+ 9 for i in range(8): A = A + 9 A =", "+ 8 A = A + 7 A = A + 4 A", "A + 7 A = A + 8 A = A + 4", "in range(8): A = A + 2 A = A + 5 for", "+ 7 A = A + 1 A = A + 5 for", "+ 7 for i in range(4): A = A + 2 A =", "i in range(4): A = A + 6 A = A + 4", "in range(1): A = A + 3 A = A + 3 A", "+ 5 for i in range(5): A = A + 5 for i", "range(1): for i in range(5): A = A + 6 A = A", "+ 5 A = A + 4 for i in range(9): for i", "A = A + 3 A = A + 4 A = A", "in range(6): A = A + 9 A = A + 1 for", "for i in range(2): A = A + 9 A = A +", "range(1): A = A + 6 A = A + 6 A =", "A + 2 A = A + 2 A = A + 5", "i in range(3): A = A + 3 A = A + 4", "A = A + 5 for i in range(1): for i in range(8):", "6 A = A + 7 for i in range(4): for i in", "+ 7 A = A + 8 A = A + 3 for", "+ 9 for i in range(9): A = A + 4 A =", "= A + 4 for i in range(6): A = A + 6", "in range(2): A = A + 4 A = A + 1 for", "A + 3 A = A + 9 for i in range(6): A", "i in range(4): A = A + 6 A = A + 6", "range(5): A = A + 7 A = A + 7 A =", "5 A = A + 3 for i in range(3): A = A", "7 A = A + 6 A = A + 5 A =", "4 A = A + 9 A = A + 5 A =", "i in range(3): A = A + 3 A = A + 1", "A = A + 7 A = A + 1 for i in", "= A + 9 A = A + 4 for i in range(9):", "i in range(8): A = A + 9 for i in range(9): for", "A + 4 A = A + 7 for i in range(5): A", "A = A + 1 A = A + 7 for i in", "range(2): A = A + 7 A = A + 1 A =", "5 for i in range(9): A = A + 5 A = A", "5 A = A + 5 A = A + 7 A =", "i in range(6): A = A + 8 A = A + 1", "+ 7 for i in range(8): for i in range(1): A = A", "+ 9 A = A + 5 for i in range(9): for i", "2 for i in range(4): A = A + 6 for i in", "+ 2 A = A + 8 A = A + 4 for", "for i in range(6): A = A + 2 A = A +", "A = A + 3 for i in range(1): for i in range(1):", "A + 6 for i in range(9): A = A + 3 for", "range(8): A = A + 8 A = A + 1 A =", "A + 5 A = A + 3 A = A + 3", "range(6): A = A + 6 A = A + 1 A =", "= A + 9 for i in range(1): A = A + 8", "in range(4): A = A + 7 A = A + 1 for", "+ 5 A = A + 9 for i in range(8): A =", "7 A = A + 1 A = A + 4 for i", "A + 4 for i in range(8): A = A + 5 A", "i in range(1): for i in range(8): A = A + 5 for", "1 A = A + 9 for i in range(3): for i in", "= A + 6 for i in range(4): A = A + 9", "+ 3 A = A + 6 for i in range(6): A =", "for i in range(6): A = A + 8 A = A +", "A + 9 A = A + 7 for i in range(5): A", "range(4): A = A + 1 A = A + 6 A =", "= A + 6 A = A + 5 A = A +", "range(2): A = A + 5 A = A + 6 for i", "+ 6 A = A + 5 for i in range(7): A =", "A = A + 4 for i in range(3): A = A +", "= A + 1 A = A + 4 for i in range(1):", "i in range(2): A = A + 6 for i in range(1): for", "= A + 8 for i in range(2): A = A + 9", "for i in range(5): A = A + 7 A = A +", "2 A = A + 7 A = A + 2 for i", "3 A = A + 9 for i in range(6): A = A", "for i in range(9): A = A + 3 for i in range(9):", "A = A + 8 A = A + 4 for i in", "8 for i in range(2): A = A + 6 for i in", "3 A = A + 2 A = A + 1 A =", "9 A = A + 5 for i in range(9): for i in", "8 for i in range(4): for i in range(4): A = A +", "= A + 1 A = A + 7 for i in range(8):", "in range(3): A = A + 3 A = A + 4 A", "A = A + 9 A = A + 5 for i in", "+ 5 A = A + 5 A = A + 1 A", "= A + 5 A = A + 7 for i in range(6):", "A + 2 A = A + 8 A = A + 6", "A + 8 A = A + 1 A = A + 2", "A + 9 for i in range(9): A = A + 4 A", "in range(5): A = A + 7 A = A + 7 A", "8 for i in range(4): A = A + 6 A = A", "+ 7 A = A + 6 A = A + 9 A", "A + 5 for i in range(7): A = A + 3 A", "= A + 1 for i in range(9): A = A + 2", "= A + 1 A = A + 1 for i in range(5):", "A + 6 for i in range(4): for i in range(2): A =", "+ 2 for i in range(8): A = A + 9 for i", "A = A + 1 A = A + 5 for i in", "+ 2 A = A + 9 A = A + 7 for", "1 A = A + 9 for i in range(3): A = A", "i in range(2): A = A + 5 for i in range(3): A", "A = A + 1 A = A + 1 A = A", "= A + 4 for i in range(9): A = A + 3", "in range(1): for i in range(5): A = A + 6 A =", "A = A + 4 A = A + 1 for i in", "+ 4 A = A + 7 for i in range(5): A =", "range(7): A = A + 7 for i in range(7): A = A", "= A + 1 for i in range(9): for i in range(6): A", "= A + 4 for i in range(3): A = A + 3", "= A + 5 for i in range(3): A = A + 5", "A = A + 9 for i in range(2): for i in range(6):", "A + 4 A = A + 7 for i in range(3): A", "7 for i in range(9): A = A + 2 A = A", "+ 8 for i in range(7): A = A + 4 for i", "+ 9 A = A + 1 A = A + 9 A", "A + 9 for i in range(4): for i in range(1): A =", "A + 1 for i in range(8): A = A + 8 A", "= A + 1 for i in range(2): A = A + 3", "range(2): for i in range(3): A = A + 1 A = A", "i in range(2): A = A + 1 A = A + 7", "6 A = A + 9 for i in range(1): A = A", "in range(8): A = A + 9 for i in range(9): for i", "A + 7 A = A + 8 A = A + 3", "= A + 9 for i in range(3): A = A + 5", "+ 9 A = A + 5 A = A + 5 A", "range(8): for i in range(6): A = A + 3 for i in", "A = A + 1 A = A + 5 A = A", "+ 9 A = A + 3 for i in range(1): for i", "in range(4): A = A + 6 A = A + 6 for", "+ 1 A = A + 1 for i in range(5): A =", "+ 2 A = A + 4 A = A + 9 for", "A + 5 A = A + 9 for i in range(8): A", "= A + 1 A = A + 2 A = A +", "+ 1 A = A + 8 for i in range(7): A =", "= A + 8 A = A + 4 for i in range(1):", "+ 7 for i in range(3): A = A + 9 A =", "A + 7 A = A + 2 for i in range(2): A", "+ 7 A = A + 7 for i in range(7): A =", "range(4): A = A + 8 for i in range(7): A = A", "A = A + 4 A = A + 9 for i in", "= A + 9 for i in range(5): A = A + 9", "A + 1 A = A + 2 for i in range(6): for", "in range(8): for i in range(2): A = A + 6 A =", "i in range(4): for i in range(8): A = A + 4 for", "+ 8 for i in range(8): for i in range(4): for i in", "A + 6 A = A + 5 A = A + 5", "= A + 2 A = A + 5 for i in range(4):", "for i in range(5): A = A + 5 for i in range(3):", "+ 5 A = A + 7 for i in range(6): A =", "A + 6 A = A + 1 A = A + 3", "range(9): for i in range(3): A = A + 4 for i in", "i in range(6): A = A + 2 A = A + 8", "+ 7 A = A + 8 A = A + 5 for", "i in range(3): for i in range(3): A = A + 5 print(A)", "A + 5 for i in range(2): A = A + 4 A", "= A + 5 for i in range(8): for i in range(6): A", "+ 2 for i in range(4): A = A + 7 A =", "A = A + 4 for i in range(5): A = A +", "5 for i in range(2): A = A + 5 A = A", "+ 1 A = A + 8 for i in range(6): A =", "= A + 9 A = A + 8 for i in range(8):", "= A + 1 for i in range(6): A = A + 4", "= A + 4 for i in range(5): A = A + 8", "3 for i in range(3): A = A + 4 A = A", "A + 8 for i in range(8): A = A + 5 for", "+ 4 A = A + 4 for i in range(6): A =", "for i in range(6): for i in range(9): A = A + 7", "i in range(1): A = A + 5 A = A + 2", "A + 3 A = A + 5 for i in range(7): A", "+ 2 for i in range(9): A = A + 5 for i", "A + 6 A = A + 1 for i in range(7): A", "A + 3 A = A + 2 A = A + 7", "i in range(5): A = A + 1 for i in range(9): for", "range(8): A = A + 2 A = A + 5 for i", "A = A + 7 A = A + 8 A = A", "A + 7 A = A + 5 A = A + 8", "+ 1 A = A + 3 A = A + 8 for", "range(5): A = A + 3 for i in range(3): A = A", "= A + 3 A = A + 5 for i in range(3):", "A = A + 1 for i in range(9): A = A +", "+ 9 A = A + 1 A = A + 9 for", "+ 3 A = A + 6 A = A + 4 A", "A + 2 A = A + 1 for i in range(1): A", "+ 5 A = A + 7 A = A + 8 A", "5 for i in range(7): A = A + 3 A = A", "i in range(4): A = A + 7 A = A + 1", "for i in range(8): for i in range(1): A = A + 7", "for i in range(7): A = A + 3 A = A +", "+ 1 A = A + 7 A = A + 6 A", "i in range(1): A = A + 8 for i in range(8): for", "in range(2): for i in range(3): A = A + 1 A =", "range(1): A = A + 7 for i in range(4): A = A", "range(8): for i in range(1): for i in range(4): A = A +", "7 A = A + 4 for i in range(8): A = A", "i in range(5): for i in range(6): A = A + 5 A", "for i in range(6): for i in range(1): A = A + 3", "range(2): A = A + 2 for i in range(3): A = A", "range(9): A = A + 5 A = A + 5 for i", "= A + 1 for i in range(1): A = A + 1", "+ 9 A = A + 6 for i in range(9): A =", "3 for i in range(2): A = A + 5 for i in", "+ 3 A = A + 6 A = A + 4 for", "+ 2 for i in range(7): for i in range(4): A = A", "= A + 8 for i in range(7): A = A + 7", "A + 9 A = A + 1 for i in range(6): A", "3 A = A + 9 for i in range(7): A = A", "= A + 5 A = A + 2 A = A +", "= A + 1 A = A + 9 for i in range(3):", "range(5): A = A + 1 for i in range(9): for i in", "= A + 7 for i in range(4): for i in range(7): A", "range(1): A = A + 4 for i in range(5): A = A", "range(3): A = A + 5 A = A + 3 A =", "= A + 3 for i in range(1): A = A + 2", "in range(3): for i in range(5): A = A + 4 for i", "A + 9 for i in range(6): A = A + 9 A", "= A + 1 A = A + 2 for i in range(6):", "+ 6 for i in range(7): A = A + 9 for i", "4 A = A + 5 for i in range(9): for i in", "in range(4): A = A + 6 A = A + 4 A", "i in range(9): A = A + 4 A = A + 3", "range(9): for i in range(7): A = A + 5 for i in", "for i in range(2): A = A + 6 for i in range(1):", "+ 9 for i in range(2): for i in range(6): A = A", "A + 8 A = A + 7 A = A + 9", "= A + 5 for i in range(3): A = A + 9", "4 A = A + 6 for i in range(5): for i in", "+ 1 A = A + 2 A = A + 8 A", "+ 3 for i in range(8): for i in range(8): A = A", "A = A + 6 A = A + 7 A = A", "= A + 8 A = A + 8 A = A +", "5 A = A + 4 for i in range(4): for i in", "= A + 3 A = A + 6 for i in range(1):", "in range(1): A = A + 7 for i in range(4): A =", "range(3): A = A + 4 A = A + 4 A =", "A + 6 for i in range(9): A = A + 6 for", "in range(8): A = A + 5 for i in range(8): A =", "i in range(2): A = A + 6 for i in range(6): A", "= A + 7 for i in range(8): A = A + 2", "+ 7 for i in range(9): A = A + 6 A =", "= A + 5 A = A + 7 A = A +", "9 A = A + 6 A = A + 2 for i", "8 A = A + 1 A = A + 4 for i", "in range(6): A = A + 5 for i in range(6): A =", "A + 5 for i in range(9): for i in range(7): A =", "1 A = A + 9 A = A + 5 A =", "= A + 6 A = A + 6 for i in range(9):", "= A + 1 for i in range(8): A = A + 8", "A + 2 A = A + 8 for i in range(4): for", "2 for i in range(6): A = A + 9 A = A", "+ 3 for i in range(2): for i in range(8): A = A", "for i in range(7): A = A + 2 for i in range(4):", "A + 1 for i in range(8): A = A + 7 A", "A + 8 A = A + 2 A = A + 3", "9 for i in range(1): A = A + 4 for i in", "range(5): A = A + 6 A = A + 2 for i", "for i in range(8): for i in range(5): A = A + 9", "i in range(8): A = A + 7 A = A + 2", "= A + 3 for i in range(2): A = A + 2", "i in range(4): A = A + 5 A = A + 5", "i in range(9): A = A + 2 for i in range(1): A", "+ 2 A = A + 6 A = A + 8 A", "for i in range(2): A = A + 3 for i in range(3):", "+ 8 A = A + 5 A = A + 1 A", "= A + 9 A = A + 7 for i in range(7):", "range(8): A = A + 5 for i in range(8): A = A", "A = A + 3 A = A + 6 for i in", "A = A + 1 A = A + 3 A = A", "= A + 6 for i in range(6): A = A + 9", "A = A + 9 A = A + 1 for i in", "= A + 6 A = A + 4 for i in range(9):", "i in range(2): A = A + 7 A = A + 1", "in range(4): A = A + 8 A = A + 6 A", "range(7): A = A + 3 A = A + 5 for i", "i in range(8): A = A + 2 A = A + 4", "in range(7): A = A + 2 A = A + 2 A", "4 A = A + 4 for i in range(6): A = A", "8 A = A + 3 A = A + 1 A =", "= A + 6 A = A + 8 for i in range(1):", "+ 6 A = A + 8 A = A + 8 A", "= A + 2 A = A + 5 for i in range(1):", "A = A + 5 A = A + 2 A = A", "= A + 2 for i in range(4): A = A + 7", "= A + 2 for i in range(2): A = A + 6", "for i in range(9): A = A + 8 for i in range(9):", "= A + 4 for i in range(8): A = A + 9", "in range(1): A = A + 5 A = A + 2 A", "i in range(8): A = A + 5 A = A + 1", "in range(2): A = A + 7 A = A + 1 A", "A + 4 A = A + 9 A = A + 8", "A + 1 for i in range(2): A = A + 3 for", "7 for i in range(4): for i in range(7): A = A +", "A + 4 for i in range(5): for i in range(3): for i", "range(2): A = A + 3 for i in range(5): A = A", "+ 1 A = A + 9 A = A + 5 A", "in range(3): A = A + 4 A = A + 4 for", "in range(5): A = A + 3 for i in range(6): for i", "6 for i in range(2): A = A + 4 A = A", "A + 3 A = A + 5 A = A + 1", "6 A = A + 1 A = A + 5 A =", "for i in range(5): for i in range(1): A = A + 4", "for i in range(7): A = A + 6 for i in range(5):", "+ 7 for i in range(3): A = A + 6 for i", "7 A = A + 7 A = A + 5 A =", "= A + 9 for i in range(5): A = A + 2", "A + 4 A = A + 4 A = A + 2", "range(9): A = A + 2 A = A + 8 A =", "A = A + 3 A = A + 9 for i in", "1 for i in range(1): for i in range(5): A = A +", "8 A = A + 7 A = A + 8 A =", "for i in range(9): A = A + 7 A = A +", "7 for i in range(9): A = A + 8 for i in", "+ 3 for i in range(9): A = A + 1 A =", "A + 6 A = A + 4 A = A + 3", "A + 7 A = A + 9 A = A + 2", "A + 1 for i in range(6): A = A + 4 A", "in range(4): A = A + 6 A = A + 3 for", "+ 5 for i in range(8): for i in range(2): A = A", "+ 9 A = A + 8 for i in range(1): A =", "A + 7 A = A + 7 A = A + 9", "1 for i in range(4): for i in range(1): A = A +", "i in range(6): A = A + 4 A = A + 6", "in range(2): A = A + 9 A = A + 4 A", "i in range(2): A = A + 7 for i in range(9): A", "+ 7 for i in range(2): for i in range(7): A = A", "= A + 1 for i in range(4): for i in range(1): A", "in range(3): A = A + 1 A = A + 1 for", "in range(6): A = A + 8 A = A + 4 for", "A + 9 A = A + 1 A = A + 8", "+ 8 A = A + 1 A = A + 2 for", "2 A = A + 7 A = A + 4 for i", "in range(2): for i in range(6): A = A + 9 A =", "A = A + 9 A = A + 3 for i in", "for i in range(8): A = A + 4 for i in range(3):", "A = A + 2 A = A + 3 A = A", "in range(9): A = A + 1 A = A + 3 A", "in range(8): A = A + 6 A = A + 1 A", "A + 3 A = A + 8 for i in range(7): A", "= A + 3 for i in range(5): A = A + 3", "4 for i in range(5): A = A + 8 A = A", "+ 2 A = A + 8 for i in range(4): for i", "8 for i in range(8): A = A + 5 for i in", "A + 1 for i in range(8): A = A + 5 for", "+ 7 A = A + 2 for i in range(2): A =", "2 A = A + 5 A = A + 3 A =", "i in range(2): A = A + 3 A = A + 9", "i in range(2): A = A + 4 for i in range(8): A", "= A + 5 for i in range(3): for i in range(5): A", "i in range(2): A = A + 7 A = A + 7", "range(7): A = A + 1 for i in range(2): A = A", "= A + 8 A = A + 3 for i in range(8):", "for i in range(2): A = A + 2 for i in range(3):", "for i in range(2): for i in range(8): A = A + 5", "A + 7 A = A + 3 A = A + 5", "= A + 1 for i in range(9): for i in range(7): A", "3 A = A + 4 A = A + 2 A =", "i in range(6): A = A + 4 A = A + 3", "in range(8): A = A + 7 A = A + 2 A", "i in range(2): A = A + 4 for i in range(2): A", "A + 4 A = A + 2 for i in range(9): for", "A + 1 A = A + 3 for i in range(5): for", "= A + 4 A = A + 7 for i in range(2):", "range(4): A = A + 6 A = A + 8 for i", "A + 7 for i in range(9): A = A + 8 for", "A = A + 2 A = A + 7 for i in", "= A + 7 for i in range(7): A = A + 6", "9 A = A + 1 A = A + 2 A =", "A = A + 8 for i in range(1): A = A +", "range(2): A = A + 9 for i in range(1): A = A", "A = A + 5 A = A + 7 for i in", "= A + 6 for i in range(4): A = A + 8", "+ 9 A = A + 8 for i in range(8): for i", "+ 5 A = A + 3 A = A + 5 A", "6 for i in range(1): A = A + 7 A = A", "= A + 4 A = A + 2 for i in range(9):", "+ 2 A = A + 5 A = A + 8 A", "2 for i in range(4): A = A + 7 A = A", "A = A + 7 for i in range(7): for i in range(4):", "for i in range(6): A = A + 2 for i in range(5):", "A + 9 for i in range(3): for i in range(3): A =", "8 A = A + 7 A = A + 1 A =", "+ 6 for i in range(9): A = A + 1 A =", "in range(4): for i in range(7): A = A + 1 for i", "3 for i in range(8): A = A + 2 A = A", "A + 1 A = A + 9 for i in range(3): for", "= A + 7 for i in range(9): A = A + 8", "range(3): A = A + 2 A = A + 5 A =", "+ 2 A = A + 1 for i in range(9): A =", "3 for i in range(4): A = A + 4 A = A", "i in range(7): A = A + 3 for i in range(5): A", "= A + 5 for i in range(1): for i in range(8): A", "= A + 3 for i in range(5): A = A + 2", "A + 2 A = A + 5 A = A + 3", "for i in range(8): A = A + 5 A = A +", "A + 1 A = A + 7 for i in range(8): A", "i in range(9): for i in range(7): A = A + 3 for", "in range(1): for i in range(1): A = A + 1 for i", "A + 2 A = A + 7 A = A + 4", "+ 7 A = A + 1 A = A + 4 for", "A = A + 4 A = A + 5 for i in", "in range(3): A = A + 7 for i in range(9): for i", "+ 5 for i in range(7): A = A + 5 for i", "+ 5 A = A + 9 for i in range(5): A =", "= A + 8 for i in range(4): for i in range(4): A", "A = A + 4 for i in range(5): for i in range(3):", "i in range(2): for i in range(3): A = A + 1 A", "A = A + 3 A = A + 3 for i in", "A + 6 for i in range(4): A = A + 9 A", "= A + 8 for i in range(6): A = A + 6", "in range(7): A = A + 9 for i in range(9): A =", "A = A + 6 for i in range(7): for i in range(6):", "for i in range(2): A = A + 7 A = A +", "range(7): A = A + 8 A = A + 3 for i", "in range(2): A = A + 5 A = A + 7 A", "for i in range(2): A = A + 4 for i in range(2):", "2 for i in range(5): A = A + 7 A = A", "A + 4 A = A + 3 for i in range(4): A", "for i in range(1): for i in range(1): for i in range(1): A", "for i in range(5): A = A + 1 for i in range(9):", "in range(9): for i in range(2): A = A + 3 A =", "A + 8 for i in range(9): A = A + 8 A", "7 A = A + 8 A = A + 3 A =", "+ 1 for i in range(5): A = A + 1 for i", "range(3): A = A + 6 for i in range(5): A = A", "i in range(5): A = A + 9 A = A + 7", "= A + 9 for i in range(3): for i in range(3): A", "for i in range(6): A = A + 4 A = A +", "in range(1): A = A + 2 A = A + 5 for", "= A + 5 for i in range(9): for i in range(3): A", "i in range(7): A = A + 5 A = A + 2", "A + 6 for i in range(9): A = A + 1 A", "range(7): A = A + 6 for i in range(5): A = A", "+ 2 for i in range(8): for i in range(6): A = A", "in range(5): A = A + 6 A = A + 9 A", "A + 3 A = A + 1 A = A + 7", "range(9): for i in range(2): A = A + 3 A = A", "in range(3): A = A + 3 A = A + 1 A", "A + 3 A = A + 1 A = A + 2", "2 for i in range(7): for i in range(4): A = A +", "i in range(8): A = A + 4 A = A + 2", "+ 6 A = A + 2 for i in range(3): A =", "+ 7 for i in range(4): for i in range(7): A = A", "range(1): for i in range(8): A = A + 5 for i in", "+ 1 for i in range(9): for i in range(5): A = A", "+ 1 for i in range(1): for i in range(5): A = A", "range(1): for i in range(1): for i in range(1): A = A +", "8 for i in range(6): A = A + 5 for i in", "= A + 3 for i in range(4): A = A + 1", "+ 1 for i in range(8): A = A + 4 A =", "+ 3 A = A + 1 A = A + 7 A", "= A + 4 for i in range(9): A = A + 4", "A = A + 6 for i in range(2): A = A +", "+ 8 A = A + 5 for i in range(1): A =", "range(5): A = A + 8 A = A + 8 for i", "+ 2 A = A + 1 A = A + 5 for", "for i in range(1): A = A + 9 A = A +", "+ 3 A = A + 9 A = A + 6 for", "for i in range(5): A = A + 3 for i in range(3):", "5 A = A + 7 for i in range(6): A = A", "7 for i in range(8): A = A + 6 A = A", "6 for i in range(2): A = A + 2 A = A", "in range(6): A = A + 8 A = A + 5 for", "+ 4 A = A + 4 for i in range(3): A =", "A + 6 for i in range(1): A = A + 3 A", "A + 5 A = A + 5 for i in range(2): for", "+ 6 for i in range(4): A = A + 9 A =", "range(6): A = A + 4 A = A + 9 A =", "2 A = A + 1 for i in range(8): for i in", "A + 2 for i in range(4): A = A + 6 for", "= A + 4 for i in range(2): A = A + 9", "A = A + 9 A = A + 2 for i in", "+ 3 A = A + 9 for i in range(6): A =", "A + 2 A = A + 8 for i in range(2): A", "A + 6 for i in range(6): A = A + 9 A", "= A + 2 for i in range(3): A = A + 1", "+ 7 for i in range(7): A = A + 3 A =", "+ 4 A = A + 9 A = A + 5 A", "i in range(2): A = A + 2 for i in range(3): A", "A + 7 A = A + 3 for i in range(2): A", "+ 2 for i in range(2): A = A + 6 for i", "= A + 6 A = A + 8 A = A +", "+ 8 for i in range(2): A = A + 9 A =", "A = A + 2 for i in range(5): A = A +", "+ 7 A = A + 4 A = A + 2 A", "A + 5 for i in range(1): A = A + 8 A", "range(2): A = A + 4 for i in range(8): A = A", "= A + 3 A = A + 7 for i in range(8):", "A = A + 1 for i in range(7): A = A +", "i in range(1): A = A + 2 A = A + 5", "4 for i in range(8): A = A + 9 A = A", "A + 8 A = A + 4 for i in range(8): for", "in range(5): A = A + 7 A = A + 5 A", "for i in range(5): A = A + 1 for i in range(8):", "+ 8 A = A + 7 A = A + 8 A", "A + 1 for i in range(6): A = A + 8 A", "4 A = A + 2 A = A + 8 A =", "in range(1): A = A + 8 A = A + 7 A", "= A + 1 for i in range(1): A = A + 8", "= A + 3 for i in range(6): for i in range(2): A", "A = A + 1 A = A + 8 A = A", "+ 5 for i in range(1): A = A + 4 for i", "A + 5 for i in range(2): A = A + 1 A", "in range(2): A = A + 7 for i in range(9): A =", "in range(2): for i in range(8): A = A + 5 A =", "A + 5 A = A + 5 for i in range(2): A", "+ 1 A = A + 2 for i in range(6): for i", "range(5): for i in range(6): A = A + 8 for i in", "A + 3 A = A + 4 A = A + 5", "A + 1 A = A + 1 for i in range(5): A", "8 A = A + 2 A = A + 1 A =", "range(6): A = A + 4 A = A + 6 for i", "A + 1 A = A + 7 A = A + 3", "= A + 4 for i in range(2): A = A + 3", "= A + 4 A = A + 7 for i in range(3):", "+ 4 for i in range(3): for i in range(2): A = A", "range(8): A = A + 7 for i in range(6): A = A", "i in range(4): A = A + 6 for i in range(3): A", "1 for i in range(8): A = A + 7 A = A", "range(2): A = A + 3 A = A + 2 A =", "A = A + 1 for i in range(6): A = A +", "A + 1 A = A + 3 A = A + 2", "= A + 4 for i in range(4): A = A + 6", "range(6): A = A + 1 for i in range(9): for i in", "in range(5): A = A + 1 A = A + 1 for", "i in range(6): A = A + 7 for i in range(4): A", "= A + 2 for i in range(8): for i in range(6): A", "= A + 8 for i in range(4): A = A + 2", "5 A = A + 1 A = A + 2 A =", "in range(6): A = A + 4 A = A + 6 for", "A + 1 A = A + 4 A = A + 1", "1 A = A + 4 A = A + 1 A =", "= A + 2 A = A + 3 A = A +", "= A + 1 for i in range(5): A = A + 6", "for i in range(4): for i in range(8): A = A + 4", "for i in range(3): A = A + 6 for i in range(9):", "A + 3 A = A + 9 A = A + 1", "range(1): A = A + 2 for i in range(5): A = A", "A + 4 A = A + 4 A = A + 8", "for i in range(5): for i in range(3): for i in range(6): A", "A + 9 A = A + 7 for i in range(7): for", "+ 7 A = A + 8 A = A + 4 A", "+ 3 for i in range(9): A = A + 9 A =", "+ 6 for i in range(1): A = A + 4 for i", "i in range(3): A = A + 7 for i in range(3): A", "+ 7 A = A + 7 A = A + 9 A", "+ 6 for i in range(1): A = A + 9 A =", "9 A = A + 7 for i in range(3): A = A", "8 for i in range(1): A = A + 2 for i in", "= A + 8 for i in range(7): A = A + 8", "range(2): A = A + 7 A = A + 7 for i", "+ 5 A = A + 3 A = A + 3 A", "A + 6 A = A + 4 A = A + 9", "A + 9 A = A + 2 for i in range(2): A", "= A + 8 for i in range(5): A = A + 6", "A = A + 4 A = A + 3 A = A", "i in range(5): A = A + 3 for i in range(3): A", "+ 3 for i in range(2): A = A + 5 A =", "A + 7 A = A + 9 A = A + 7", "for i in range(1): for i in range(5): A = A + 6", "range(9): A = A + 8 for i in range(9): A = A", "+ 1 A = A + 2 for i in range(6): A =", "A + 4 for i in range(4): for i in range(9): A =", "for i in range(8): for i in range(1): for i in range(4): A", "6 for i in range(9): A = A + 3 for i in", "A + 5 A = A + 7 for i in range(5): A", "range(6): for i in range(9): A = A + 7 A = A", "= A + 7 A = A + 7 for i in range(7):", "2 for i in range(2): A = A + 7 A = A", "+ 3 A = A + 9 A = A + 4 A", "+ 4 A = A + 7 A = A + 6 A", "i in range(6): A = A + 6 for i in range(2): A", "for i in range(5): A = A + 4 A = A +", "8 for i in range(7): A = A + 8 for i in", "in range(8): for i in range(7): A = A + 2 for i", "A + 5 A = A + 6 for i in range(2): A", "i in range(2): A = A + 2 A = A + 9", "= A + 6 for i in range(5): A = A + 8", "A + 7 A = A + 6 A = A + 9", "+ 6 for i in range(5): A = A + 8 A =", "+ 7 for i in range(7): for i in range(4): A = A", "A + 9 for i in range(8): A = A + 9 A", "8 A = A + 7 A = A + 9 A =", "A = A + 6 A = A + 9 for i in", "A + 2 A = A + 1 A = A + 5", "for i in range(6): A = A + 5 for i in range(6):", "A + 6 for i in range(2): A = A + 2 A", "9 A = A + 1 for i in range(4): for i in", "5 for i in range(2): A = A + 1 A = A", "= A + 4 A = A + 9 for i in range(1):", "4 A = A + 2 A = A + 8 for i", "+ 3 A = A + 7 for i in range(5): for i", "= A + 4 A = A + 3 for i in range(6):", "2 A = A + 8 A = A + 4 for i", "4 A = A + 3 for i in range(4): A = A", "4 A = A + 8 for i in range(4): A = A", "9 A = A + 1 A = A + 8 A =", "in range(5): A = A + 8 A = A + 8 for", "+ 5 for i in range(1): A = A + 2 for i", "9 A = A + 2 for i in range(1): A = A", "range(9): for i in range(3): A = A + 4 A = A", "A = A + 9 A = A + 8 A = A", "+ 5 for i in range(6): A = A + 3 for i", "A = A + 8 A = A + 5 A = A", "for i in range(4): A = A + 6 A = A +", "in range(7): A = A + 3 A = A + 6 A", "+ 4 for i in range(8): A = A + 2 A =", "5 A = A + 7 A = A + 8 A =", "A = A + 9 A = A + 4 A = A", "A + 8 for i in range(7): A = A + 8 for", "+ 8 A = A + 4 for i in range(3): for i", "A = A + 2 for i in range(6): for i in range(1):", "for i in range(8): A = A + 9 for i in range(9):", "= A + 2 A = A + 1 A = A +", "range(4): for i in range(8): A = A + 4 for i in", "+ 5 for i in range(1): A = A + 8 A =", "+ 1 for i in range(4): for i in range(1): A = A", "A + 3 for i in range(9): A = A + 1 A", "3 A = A + 5 for i in range(6): A = A", "8 for i in range(8): for i in range(5): A = A +", "range(5): A = A + 9 A = A + 4 for i", "= A + 1 A = A + 3 for i in range(5):", "A + 4 for i in range(9): A = A + 4 A", "+ 5 A = A + 8 for i in range(8): A =", "A + 2 A = A + 7 for i in range(4): A", "3 for i in range(1): A = A + 2 for i in", "A = A + 2 A = A + 3 for i in", "5 A = A + 2 A = A + 2 A =", "= A + 2 A = A + 6 A = A +", "A = A + 3 A = A + 7 for i in", "for i in range(2): for i in range(6): A = A + 9", "in range(4): for i in range(1): A = A + 6 A =", "= A + 7 A = A + 2 A = A +", "A = A + 5 for i in range(3): A = A +", "= A + 9 for i in range(8): A = A + 7", "A + 4 for i in range(6): A = A + 6 for", "range(8): A = A + 4 A = A + 7 for i", "= A + 4 for i in range(5): for i in range(3): for", "+ 1 A = A + 6 A = A + 1 for", "8 A = A + 3 A = A + 5 A =", "7 A = A + 8 A = A + 5 for i", "6 A = A + 1 A = A + 2 for i", "8 A = A + 6 A = A + 1 A =", "i in range(1): A = A + 2 for i in range(5): A", "range(8): for i in range(6): A = A + 1 A = A", "in range(7): A = A + 5 for i in range(2): A =", "+ 9 A = A + 2 for i in range(1): A =", "+ 6 for i in range(1): for i in range(2): A = A", "A + 3 A = A + 4 A = A + 2", "i in range(8): A = A + 6 A = A + 1", "+ 7 for i in range(5): A = A + 9 A =", "for i in range(5): A = A + 9 for i in range(8):", "= A + 2 A = A + 1 for i in range(8):", "A + 5 A = A + 5 for i in range(4): A", "+ 2 A = A + 2 A = A + 5 for", "range(7): A = A + 3 A = A + 6 A =", "+ 5 for i in range(8): for i in range(6): A = A", "= A + 5 A = A + 1 A = A +", "A = A + 5 for i in range(8): for i in range(6):", "+ 8 A = A + 3 A = A + 3 A", "A = A + 6 A = A + 7 for i in", "i in range(6): A = A + 8 A = A + 4", "6 for i in range(7): A = A + 9 for i in", "+ 6 for i in range(7): for i in range(6): for i in", "in range(4): A = A + 7 A = A + 7 A", "2 for i in range(5): A = A + 9 A = A", "for i in range(6): A = A + 3 for i in range(9):", "A + 2 A = A + 1 for i in range(9): A", "in range(8): for i in range(6): A = A + 4 A =", "= A + 2 A = A + 3 for i in range(2):", "i in range(5): A = A + 1 A = A + 1", "5 A = A + 4 for i in range(9): for i in", "A = A + 5 A = A + 7 A = A", "range(2): A = A + 5 A = A + 1 A =", "A + 2 A = A + 1 for i in range(8): for", "+ 1 A = A + 4 for i in range(8): A =", "i in range(5): A = A + 9 A = A + 1", "+ 6 A = A + 4 A = A + 3 A", "+ 8 for i in range(5): A = A + 6 A =", "in range(4): for i in range(1): A = A + 7 for i", "A = A + 4 A = A + 4 for i in", "4 for i in range(1): for i in range(7): for i in range(2):", "+ 6 for i in range(6): A = A + 7 A =", "= A + 5 A = A + 5 for i in range(2):", "range(8): A = A + 5 for i in range(1): A = A", "in range(3): A = A + 1 A = A + 8 for", "for i in range(7): A = A + 3 for i in range(5):", "= A + 4 A = A + 3 A = A +", "for i in range(2): for i in range(3): A = A + 1", "+ 2 for i in range(7): A = A + 5 A =", "in range(4): A = A + 1 A = A + 6 A", "5 A = A + 2 A = A + 1 for i", "A = A + 4 for i in range(4): A = A +", "1 A = A + 1 for i in range(5): A = A", "1 A = A + 2 for i in range(4): A = A", "+ 4 for i in range(3): A = A + 3 A =", "range(5): A = A + 6 for i in range(1): A = A", "+ 5 for i in range(3): A = A + 5 A =", "A + 2 A = A + 5 A = A + 2", "i in range(1): A = A + 9 A = A + 3", "3 A = A + 6 for i in range(9): A = A", "in range(6): A = A + 9 for i in range(5): A =", "A + 7 A = A + 9 A = A + 6", "in range(9): for i in range(6): A = A + 5 A =", "= A + 3 for i in range(4): A = A + 4", "+ 8 A = A + 3 for i in range(2): A =", "A + 7 A = A + 7 for i in range(7): A", "A + 1 A = A + 1 A = A + 5", "+ 1 A = A + 5 for i in range(3): A =", "range(1): A = A + 7 for i in range(9): A = A", "= A + 8 A = A + 3 for i in range(5):", "range(5): A = A + 5 A = A + 8 A =", "= A + 7 for i in range(5): A = A + 7", "= A + 8 A = A + 9 A = A +", "in range(4): A = A + 7 A = A + 9 A", "= A + 5 for i in range(6): A = A + 7", "i in range(1): A = A + 6 for i in range(2): A", "i in range(6): A = A + 8 A = A + 5", "in range(4): A = A + 4 A = A + 3 A", "+ 4 for i in range(9): A = A + 4 A =", "+ 5 for i in range(9): for i in range(7): A = A", "+ 7 A = A + 4 A = A + 5 A", "A + 3 A = A + 3 A = A + 5", "8 for i in range(7): A = A + 4 for i in", "+ 5 A = A + 7 A = A + 2 for", "A + 3 A = A + 3 for i in range(5): A", "+ 8 A = A + 3 A = A + 6 A", "+ 6 A = A + 4 for i in range(9): A =", "1 for i in range(9): A = A + 2 A = A", "= A + 3 A = A + 6 A = A +", "+ 7 for i in range(5): A = A + 6 A =", "i in range(7): A = A + 3 A = A + 9", "= A + 5 A = A + 8 A = A +", "6 for i in range(6): A = A + 7 A = A", "range(2): A = A + 2 A = A + 5 A =", "i in range(9): A = A + 9 A = A + 5", "1 A = A + 8 A = A + 2 for i", "+ 8 for i in range(2): A = A + 7 for i", "A + 2 for i in range(5): A = A + 9 A", "7 for i in range(5): for i in range(4): A = A +", "A = A + 6 for i in range(1): A = A +", "= A + 7 A = A + 1 for i in range(2):", "= A + 9 A = A + 1 A = A +", "7 for i in range(6): A = A + 4 A = A", "6 A = A + 6 for i in range(9): A = A", "+ 4 A = A + 2 A = A + 2 A", "1 A = A + 2 for i in range(6): for i in", "for i in range(5): A = A + 4 for i in range(4):", "9 A = A + 1 for i in range(6): A = A", "+ 9 A = A + 1 A = A + 3 for", "A + 9 A = A + 8 for i in range(8): for", "A = A + 2 for i in range(9): for i in range(5):", "i in range(8): for i in range(7): A = A + 2 for", "range(1): A = A + 1 for i in range(6): A = A", "+ 5 for i in range(3): A = A + 5 for i", "5 for i in range(8): A = A + 6 for i in", "+ 6 A = A + 8 A = A + 9 A", "+ 7 for i in range(7): A = A + 2 A =", "6 A = A + 6 for i in range(1): A = A", "A + 2 A = A + 6 A = A + 6", "8 A = A + 7 A = A + 5 A =", "2 A = A + 1 for i in range(9): A = A", "A = A + 8 A = A + 1 A = A", "for i in range(9): A = A + 2 for i in range(1):", "A + 1 A = A + 8 A = A + 7", "= A + 7 for i in range(5): for i in range(4): A", "5 A = A + 5 A = A + 6 for i", "in range(8): A = A + 4 A = A + 2 A", "= A + 6 A = A + 9 for i in range(1):", "3 for i in range(9): A = A + 1 A = A", "for i in range(6): A = A + 6 for i in range(2):", "= A + 5 A = A + 5 for i in range(4):", "in range(5): A = A + 3 for i in range(3): A =", "+ 6 for i in range(5): A = A + 9 A =", "A = A + 9 A = A + 2 A = A", "= A + 8 for i in range(6): A = A + 4", "2 A = A + 8 A = A + 6 for i", "range(3): A = A + 9 for i in range(1): A = A", "in range(8): for i in range(6): A = A + 3 for i", "in range(8): A = A + 9 for i in range(5): A =", "+ 4 A = A + 1 for i in range(6): A =", "2 A = A + 3 A = A + 7 for i", "for i in range(9): A = A + 2 A = A +", "A = A + 6 A = A + 2 A = A", "A + 4 A = A + 7 for i in range(1): for", "A + 5 for i in range(3): A = A + 9 for", "for i in range(5): for i in range(6): A = A + 8", "range(1): A = A + 4 for i in range(9): A = A", "A + 3 A = A + 5 A = A + 5", "A + 4 A = A + 9 A = A + 1", "A + 5 for i in range(3): A = A + 3 A", "i in range(2): A = A + 7 for i in range(8): A", "in range(8): A = A + 4 A = A + 7 for", "for i in range(5): A = A + 6 A = A +", "for i in range(6): A = A + 8 for i in range(9):", "2 A = A + 8 A = A + 7 A =", "i in range(7): A = A + 8 for i in range(6): A", "A = A + 9 for i in range(8): A = A +", "A + 3 for i in range(5): A = A + 3 for", "A + 5 A = A + 2 for i in range(2): A", "A + 3 for i in range(8): A = A + 7 A", "in range(8): A = A + 4 for i in range(3): A =", "+ 8 for i in range(7): A = A + 7 A =", "9 A = A + 3 for i in range(1): for i in", "= A + 1 A = A + 4 A = A +", "7 for i in range(7): A = A + 2 A = A", "A = A + 1 A = A + 7 A = A", "for i in range(5): A = A + 7 for i in range(5):", "A + 5 A = A + 1 for i in range(8): A", "3 for i in range(5): for i in range(1): A = A +", "in range(7): A = A + 5 A = A + 5 for", "range(8): for i in range(6): A = A + 4 A = A", "+ 9 for i in range(8): A = A + 7 for i", "= A + 5 for i in range(5): A = A + 5", "i in range(7): for i in range(2): A = A + 3 A", "i in range(4): A = A + 7 A = A + 7", "A + 6 A = A + 6 A = A + 2", "A = A + 6 A = A + 5 for i in", "+ 8 A = A + 2 for i in range(9): A =", "+ 5 A = A + 2 A = A + 2 A", "+ 5 A = A + 5 for i in range(2): for i", "range(3): A = A + 7 for i in range(9): for i in", "= A + 2 for i in range(3): A = A + 4", "A + 5 for i in range(3): A = A + 6 for", "for i in range(2): A = A + 9 for i in range(1):", "range(1): A = A + 2 A = A + 8 for i", "= A + 1 for i in range(9): for i in range(5): A", "4 A = A + 3 A = A + 7 for i", "range(1): A = A + 5 for i in range(7): A = A", "5 for i in range(7): A = A + 8 for i in", "A = A + 1 for i in range(9): for i in range(5):", "in range(1): for i in range(4): A = A + 6 A =", "1 for i in range(8): A = A + 5 for i in", "+ 3 A = A + 2 A = A + 6 A", "3 for i in range(8): for i in range(8): A = A +", "1 for i in range(1): A = A + 8 A = A", "5 A = A + 4 A = A + 2 A =", "A = A + 2 A = A + 1 A = A", "for i in range(2): A = A + 5 for i in range(3):", "A + 4 A = A + 5 for i in range(9): for", "1 A = A + 8 A = A + 3 A =", "+ 3 A = A + 8 for i in range(7): A =", "+ 2 A = A + 6 A = A + 6 for", "+ 7 A = A + 5 A = A + 5 A", "A + 4 A = A + 2 A = A + 3", "A + 2 for i in range(7): for i in range(4): A =", "i in range(1): A = A + 4 for i in range(2): A", "i in range(8): for i in range(5): A = A + 9 A", "range(5): A = A + 9 A = A + 8 A =", "+ 2 for i in range(4): A = A + 6 A =", "A = A + 4 A = A + 7 for i in", "i in range(1): for i in range(1): for i in range(1): A =", "for i in range(8): A = A + 6 A = A +", "A + 4 for i in range(7): A = A + 8 A", "i in range(8): A = A + 5 for i in range(3): A", "A + 1 A = A + 6 for i in range(5): A", "A + 2 A = A + 9 A = A + 7", "5 A = A + 8 A = A + 7 A =", "in range(8): A = A + 2 for i in range(8): for i", "A + 8 A = A + 7 A = A + 1", "A + 4 for i in range(8): A = A + 2 A", "+ 7 for i in range(9): A = A + 2 A =", "6 for i in range(1): A = A + 9 A = A", "A + 1 for i in range(9): for i in range(7): A =", "A = A + 6 for i in range(3): for i in range(4):", "8 A = A + 8 A = A + 7 A =", "A + 9 A = A + 1 for i in range(1): A", "6 A = A + 1 for i in range(7): A = A", "A + 6 for i in range(1): A = A + 4 for", "= A + 7 A = A + 7 A = A +", "6 for i in range(1): A = A + 4 for i in", "range(5): A = A + 8 A = A + 7 A =", "6 for i in range(3): for i in range(4): A = A +", "+ 3 A = A + 2 A = A + 7 A", "5 A = A + 7 A = A + 7 A =", "range(8): A = A + 9 A = A + 1 for i", "in range(4): for i in range(8): A = A + 4 for i", "A + 7 for i in range(1): A = A + 8 A", "A = A + 5 A = A + 4 A = A", "+ 1 for i in range(7): A = A + 7 for i", "+ 3 A = A + 3 A = A + 5 for", "+ 4 A = A + 6 for i in range(5): for i", "2 for i in range(2): A = A + 6 for i in", "= A + 8 A = A + 4 for i in range(3):", "i in range(3): A = A + 5 A = A + 5", "A + 2 A = A + 5 A = A + 8", "= A + 5 for i in range(3): A = A + 3", "A = A + 8 A = A + 2 for i in", "i in range(1): A = A + 1 for i in range(8): A", "i in range(4): A = A + 4 A = A + 3", "9 A = A + 8 for i in range(5): A = A", "range(2): A = A + 1 A = A + 7 A =", "+ 6 A = A + 6 A = A + 4 for", "A = A + 5 A = A + 6 for i in", "5 for i in range(3): A = A + 7 for i in", "7 A = A + 1 A = A + 8 for i", "= A + 5 for i in range(9): A = A + 1", "3 for i in range(4): A = A + 1 A = A", "i in range(5): A = A + 9 A = A + 8", "in range(5): for i in range(4): A = A + 5 A =", "+ 9 A = A + 5 A = A + 3 for", "+ 2 A = A + 3 A = A + 7 for", "for i in range(1): A = A + 2 for i in range(4):", "in range(5): A = A + 9 A = A + 1 A", "range(5): A = A + 2 A = A + 1 for i", "= A + 9 A = A + 1 for i in range(1):", "+ 2 A = A + 5 for i in range(9): A =", "6 A = A + 8 for i in range(1): A = A", "A + 8 for i in range(3): A = A + 2 A", "+ 6 for i in range(5): A = A + 3 A =", "A = A + 9 A = A + 7 for i in", "for i in range(6): A = A + 9 for i in range(5):", "+ 8 for i in range(9): A = A + 8 A =", "= A + 4 A = A + 3 for i in range(4):", "6 A = A + 2 A = A + 2 A =", "1 A = A + 7 for i in range(1): A = A", "A + 4 A = A + 3 for i in range(6): for", "8 for i in range(6): A = A + 4 A = A", "in range(7): A = A + 7 for i in range(7): A =", "i in range(9): for i in range(5): A = A + 9 A", "8 A = A + 3 A = A + 9 A =", "A = A + 4 for i in range(1): for i in range(7):", "= A + 8 A = A + 6 for i in range(3):", "+ 5 for i in range(2): A = A + 6 for i", "i in range(9): for i in range(6): A = A + 5 A", "for i in range(4): A = A + 7 A = A +", "4 for i in range(2): A = A + 9 for i in", "= A + 5 for i in range(8): for i in range(2): A", "A + 9 A = A + 6 for i in range(4): for", "in range(9): A = A + 5 for i in range(1): A =", "range(6): A = A + 9 A = A + 1 A =", "A + 5 for i in range(3): A = A + 1 for", "i in range(1): A = A + 3 for i in range(1): A", "range(8): for i in range(4): for i in range(8): A = A +", "i in range(1): A = A + 3 A = A + 9", "8 for i in range(2): A = A + 4 A = A", "A + 1 for i in range(1): for i in range(5): A =", "A = A + 6 A = A + 9 A = A", "+ 5 A = A + 3 A = A + 6 A", "A = A + 4 A = A + 3 for i in", "range(1): A = A + 6 for i in range(2): A = A", "i in range(7): A = A + 2 for i in range(5): A", "= A + 2 A = A + 2 A = A +", "A + 2 A = A + 8 A = A + 7", "= A + 9 A = A + 8 A = A +", "+ 2 for i in range(6): A = A + 9 A =", "A = A + 9 for i in range(3): for i in range(3):", "in range(4): A = A + 7 A = A + 1 A", "for i in range(1): A = A + 7 for i in range(9):", "A + 8 A = A + 3 A = A + 5", "for i in range(6): for i in range(1): A = A + 5", "range(6): A = A + 9 A = A + 2 A =", "= A + 3 A = A + 5 for i in range(6):", "A + 9 A = A + 8 A = A + 3", "A + 3 A = A + 6 A = A + 8", "i in range(3): A = A + 5 for i in range(9): A", "for i in range(7): A = A + 1 for i in range(2):", "A + 1 A = A + 2 A = A + 4", "A + 8 A = A + 7 A = A + 5", "A + 3 for i in range(2): A = A + 5 for", "A = A + 6 for i in range(9): A = A +", "= A + 4 A = A + 7 A = A +", "8 for i in range(4): A = A + 7 A = A", "9 A = A + 4 A = A + 6 for i", "7 A = A + 7 A = A + 9 A =", "i in range(3): A = A + 5 A = A + 2", "i in range(9): A = A + 4 A = A + 9", "3 A = A + 4 A = A + 5 for i", "A = A + 7 for i in range(1): A = A +", "= A + 6 for i in range(6): A = A + 7", "= A + 8 for i in range(7): A = A + 4", "+ 4 A = A + 3 A = A + 7 for", "+ 3 A = A + 5 A = A + 2 A", "range(8): A = A + 2 A = A + 4 for i", "= A + 8 A = A + 3 A = A +", "A + 2 for i in range(3): A = A + 4 A", "+ 5 for i in range(9): A = A + 1 A =", "= A + 5 A = A + 1 for i in range(6):", "A = A + 5 for i in range(3): for i in range(5):", "3 for i in range(5): A = A + 2 A = A", "A + 2 A = A + 9 A = A + 1", "7 A = A + 5 A = A + 2 for i", "range(5): A = A + 1 for i in range(8): A = A", "in range(3): A = A + 2 A = A + 5 A", "i in range(3): A = A + 4 for i in range(7): A", "A + 6 A = A + 7 for i in range(4): for", "6 for i in range(1): for i in range(2): A = A +", "i in range(3): A = A + 7 A = A + 9", "A + 6 A = A + 2 A = A + 2", "= A + 7 A = A + 2 for i in range(5):", "i in range(2): A = A + 4 A = A + 9", "A + 3 for i in range(7): A = A + 3 A", "+ 8 for i in range(2): A = A + 6 for i", "i in range(1): A = A + 2 for i in range(7): A", "= A + 3 for i in range(7): A = A + 3", "range(1): A = A + 8 A = A + 7 A =", "= A + 9 for i in range(9): for i in range(2): A", "A + 3 A = A + 3 A = A + 1", "= A + 7 for i in range(2): for i in range(7): A", "2 for i in range(8): A = A + 9 for i in", "+ 4 for i in range(3): A = A + 5 A =", "A + 3 for i in range(4): A = A + 1 A", "A + 1 A = A + 8 for i in range(7): A", "i in range(6): for i in range(1): A = A + 5 A", "in range(7): A = A + 7 A = A + 7 for", "A + 8 A = A + 3 A = A + 9", "4 A = A + 7 for i in range(3): A = A", "in range(3): A = A + 5 A = A + 2 A", "range(2): A = A + 2 A = A + 9 A =", "A + 5 A = A + 5 A = A + 7", "A = A + 4 for i in range(4): for i in range(9):", "+ 9 A = A + 1 A = A + 2 A", "+ 4 A = A + 6 for i in range(3): A =", "+ 4 for i in range(4): for i in range(4): A = A", "1 A = A + 1 A = A + 3 A =", "A + 8 A = A + 2 A = A + 1", "A = A + 3 for i in range(6): for i in range(2):", "= A + 5 for i in range(4): A = A + 7", "in range(9): A = A + 3 for i in range(9): A =", "i in range(7): A = A + 6 for i in range(5): A", "= A + 6 for i in range(1): for i in range(2): A", "i in range(6): A = A + 8 for i in range(9): A", "A + 7 for i in range(3): A = A + 4 A", "range(4): A = A + 9 A = A + 4 for i", "= A + 9 A = A + 6 A = A +", "in range(3): A = A + 7 A = A + 1 for", "in range(8): A = A + 5 A = A + 1 A", "A = A + 4 A = A + 2 for i in", "+ 1 for i in range(9): for i in range(6): A = A", "+ 6 A = A + 1 A = A + 2 A", "i in range(9): A = A + 2 A = A + 8", "range(4): for i in range(1): A = A + 7 for i in", "= A + 9 A = A + 2 for i in range(2):", "= A + 2 A = A + 4 for i in range(5):", "range(9): A = A + 3 for i in range(9): A = A", "+ 6 A = A + 6 for i in range(1): A =", "range(5): A = A + 2 A = A + 5 A =", "1 A = A + 6 A = A + 1 for i", "A + 6 for i in range(6): A = A + 7 A", "+ 6 A = A + 6 A = A + 2 for", "in range(3): A = A + 5 for i in range(9): A =", "in range(6): A = A + 6 for i in range(2): A =", "+ 5 A = A + 3 A = A + 9 for", "A + 8 A = A + 5 A = A + 1", "A = A + 8 for i in range(2): A = A +", "+ 3 A = A + 6 for i in range(1): A =", "= A + 4 for i in range(3): A = A + 5", "= A + 8 for i in range(3): A = A + 7", "= A + 5 for i in range(1): A = A + 2", "= A + 7 A = A + 5 A = A +", "+ 5 A = A + 8 for i in range(2): A =", "in range(7): A = A + 4 for i in range(6): A =", "3 A = A + 3 A = A + 5 for i", "for i in range(1): A = A + 2 for i in range(7):", "range(1): A = A + 2 for i in range(4): A = A", "A + 7 for i in range(6): A = A + 4 A", "= A + 9 for i in range(1): A = A + 4", "A = A + 3 for i in range(2): A = A +", "for i in range(6): A = A + 1 A = A +", "3 A = A + 6 for i in range(1): A = A", "i in range(2): for i in range(8): A = A + 5 A", "A = A + 3 for i in range(6): for i in range(1):", "in range(4): A = A + 7 A = A + 2 for", "A = A + 8 for i in range(4): A = A +", "A + 8 for i in range(6): A = A + 4 A", "3 for i in range(5): for i in range(6): A = A +", "A = A + 4 for i in range(8): for i in range(7):", "range(5): A = A + 9 A = A + 7 for i", "2 A = A + 9 A = A + 7 for i", "3 for i in range(7): A = A + 3 A = A", "range(6): A = A + 6 for i in range(2): A = A", "6 A = A + 3 for i in range(5): for i in", "+ 3 A = A + 3 for i in range(5): A =", "9 A = A + 5 A = A + 9 for i", "A = A + 1 A = A + 6 for i in", "8 A = A + 3 for i in range(5): A = A", "for i in range(6): A = A + 9 A = A +", "+ 8 for i in range(9): A = A + 5 A =", "A + 5 A = A + 7 A = A + 2", "+ 3 for i in range(8): A = A + 2 A =", "= A + 8 A = A + 3 for i in range(2):", "A + 6 A = A + 1 A = A + 2", "6 A = A + 9 A = A + 1 for i", "A + 6 A = A + 8 for i in range(1): A", "1 A = A + 2 A = A + 6 A =", "range(3): A = A + 5 A = A + 2 A =", "i in range(8): A = A + 5 for i in range(8): A", "= A + 6 A = A + 1 for i in range(7):", "A = A + 5 for i in range(6): A = A +", "4 for i in range(2): A = A + 3 for i in", "3 A = A + 3 A = A + 9 for i", "range(1): A = A + 7 A = A + 1 for i", "4 for i in range(3): for i in range(2): A = A +", "+ 1 A = A + 2 A = A + 6 A", "A + 1 A = A + 9 for i in range(3): A", "A = A + 5 for i in range(1): A = A +", "range(4): A = A + 7 A = A + 1 A =", "A + 2 for i in range(1): A = A + 4 for", "in range(9): A = A + 1 A = A + 6 for", "8 for i in range(6): A = A + 6 for i in", "i in range(6): for i in range(1): A = A + 3 for", "in range(9): A = A + 1 A = A + 7 for", "A = A + 9 for i in range(9): A = A +", "1 A = A + 3 A = A + 9 for i", "range(9): A = A + 8 A = A + 9 A =", "= A + 1 for i in range(7): A = A + 7", "A + 3 A = A + 3 A = A + 9", "i in range(5): A = A + 5 for i in range(3): for", "in range(5): A = A + 3 A = A + 9 A", "A + 6 for i in range(7): A = A + 9 for", "range(2): A = A + 5 A = A + 7 A =", "in range(9): A = A + 2 A = A + 9 A", "= A + 3 for i in range(2): A = A + 4", "A + 4 A = A + 6 for i in range(3): A", "= A + 6 A = A + 5 for i in range(1):", "range(6): A = A + 8 for i in range(9): A = A", "7 A = A + 9 A = A + 7 for i", "range(6): A = A + 8 A = A + 1 A =", "A = A + 6 for i in range(7): A = A +", "+ 7 A = A + 3 A = A + 5 for", "= A + 4 A = A + 6 for i in range(1):", "for i in range(2): A = A + 1 A = A +", "A + 8 A = A + 3 A = A + 6", "A + 4 for i in range(9): A = A + 3 A", "+ 7 for i in range(5): for i in range(4): A = A", "A + 9 for i in range(5): A = A + 5 A", "for i in range(7): A = A + 9 for i in range(9):", "1 A = A + 6 A = A + 1 A =", "+ 6 for i in range(3): A = A + 7 for i", "= A + 5 A = A + 8 for i in range(8):", "4 A = A + 6 for i in range(3): A = A", "in range(2): A = A + 3 A = A + 7 for", "1 for i in range(6): A = A + 8 A = A", "range(9): for i in range(5): A = A + 9 A = A", "A + 7 A = A + 6 A = A + 5", "range(5): A = A + 7 A = A + 2 A =", "i in range(1): for i in range(1): A = A + 1 for", "A + 1 for i in range(8): for i in range(1): for i", "+ 1 A = A + 5 for i in range(2): A =", "A + 1 for i in range(2): A = A + 2 A", "= A + 4 for i in range(9): A = A + 2", "+ 2 for i in range(2): A = A + 4 for i", "+ 7 A = A + 7 A = A + 6 A", "A + 2 A = A + 4 A = A + 9", "A = A + 5 A = A + 8 for i in", "i in range(7): A = A + 5 for i in range(3): A", "6 A = A + 1 A = A + 3 A =", "A + 4 A = A + 6 for i in range(5): for", "A + 6 for i in range(1): A = A + 9 A", "range(7): A = A + 3 A = A + 3 A =", "+ 1 A = A + 2 for i in range(4): A =", "A + 9 for i in range(7): A = A + 2 for", "= A + 2 A = A + 8 A = A +", "A + 5 for i in range(1): A = A + 6 for", "+ 9 for i in range(5): A = A + 5 A =", "1 for i in range(7): A = A + 7 for i in", "A + 9 A = A + 1 for i in range(8): A", "A + 9 for i in range(9): for i in range(2): A =", "+ 8 for i in range(6): A = A + 6 for i", "i in range(7): for i in range(6): for i in range(9): A =", "= A + 7 for i in range(1): A = A + 7", "5 A = A + 2 A = A + 8 A =", "3 A = A + 7 for i in range(8): for i in", "= A + 1 for i in range(8): A = A + 7", "+ 3 A = A + 2 for i in range(6): A =", "in range(2): A = A + 3 for i in range(5): A =", "+ 9 A = A + 7 for i in range(7): for i", "A = A + 8 A = A + 6 for i in", "range(3): A = A + 5 for i in range(9): A = A", "+ 7 for i in range(4): A = A + 7 A =", "i in range(6): A = A + 1 for i in range(9): for", "A = A + 8 for i in range(7): A = A +", "A = A + 1 A = A + 2 for i in", "A + 5 A = A + 9 for i in range(5): A", "8 A = A + 1 A = A + 6 A =", "A + 6 for i in range(3): A = A + 7 for", "+ 7 A = A + 4 for i in range(4): for i", "+ 4 A = A + 3 for i in range(4): A =", "A = A + 5 A = A + 8 A = A", "range(2): A = A + 3 A = A + 9 A =", "in range(3): A = A + 3 for i in range(8): for i", "for i in range(3): A = A + 1 for i in range(1):", "range(8): A = A + 9 for i in range(5): A = A", "9 for i in range(8): A = A + 7 for i in", "A = A + 7 A = A + 5 A = A", "1 A = A + 4 for i in range(8): A = A", "for i in range(3): A = A + 5 A = A +", "8 A = A + 8 for i in range(3): A = A", "A + 8 for i in range(6): A = A + 5 for", "3 A = A + 2 A = A + 6 A =", "A + 4 for i in range(8): A = A + 9 for", "A + 5 for i in range(8): for i in range(2): A =", "i in range(3): A = A + 1 for i in range(1): for", "range(5): A = A + 1 A = A + 1 for i", "A + 5 A = A + 1 for i in range(6): A", "A + 1 for i in range(9): for i in range(5): A =", "range(8): A = A + 6 A = A + 1 A =", "i in range(3): A = A + 1 for i in range(1): A", "A + 6 for i in range(2): A = A + 1 A", "+ 3 for i in range(2): A = A + 5 for i", "+ 2 A = A + 7 A = A + 5 A", "+ 9 for i in range(7): A = A + 2 for i", "6 A = A + 7 A = A + 6 A =", "6 for i in range(9): A = A + 4 A = A", "in range(6): A = A + 9 A = A + 1 A", "in range(1): for i in range(7): for i in range(2): A = A", "i in range(9): A = A + 1 A = A + 7", "+ 7 A = A + 5 A = A + 8 A", "+ 5 A = A + 2 A = A + 8 A", "= A + 3 A = A + 3 for i in range(5):", "5 A = A + 5 for i in range(2): for i in", "range(4): A = A + 2 for i in range(2): A = A", "in range(9): for i in range(3): A = A + 4 for i", "A = A + 1 A = A + 9 A = A", "= A + 3 A = A + 4 A = A +", "in range(8): A = A + 5 for i in range(1): A =", "for i in range(1): A = A + 5 A = A +", "+ 9 A = A + 1 for i in range(1): A =", "A + 2 for i in range(8): for i in range(6): A =", "i in range(4): A = A + 6 A = A + 8", "7 for i in range(7): for i in range(4): A = A +", "in range(5): A = A + 9 for i in range(8): A =", "in range(1): A = A + 7 A = A + 1 for", "+ 3 A = A + 5 for i in range(6): A =", "A + 4 A = A + 1 for i in range(6): A", "A = A + 5 for i in range(8): for i in range(2):", "+ 2 for i in range(3): A = A + 4 A =", "for i in range(5): A = A + 3 A = A +", "A + 6 for i in range(3): for i in range(4): A =", "in range(5): A = A + 7 for i in range(5): A =", "A = A + 5 for i in range(5): A = A +", "A + 9 A = A + 2 A = A + 5", "9 for i in range(1): A = A + 6 A = A", "6 for i in range(2): A = A + 3 A = A", "A = A + 6 for i in range(1): for i in range(2):", "for i in range(4): for i in range(1): A = A + 7", "+ 8 A = A + 2 A = A + 1 A", "in range(4): A = A + 9 A = A + 4 for", "A = A + 4 A = A + 6 for i in", "A + 1 for i in range(4): for i in range(1): A =", "6 for i in range(4): for i in range(2): A = A +", "i in range(1): A = A + 4 for i in range(9): A", "A = A + 1 A = A + 1 for i in", "i in range(9): A = A + 7 A = A + 8", "= A + 7 A = A + 2 for i in range(2):", "+ 4 A = A + 3 A = A + 3 for", "+ 7 A = A + 5 A = A + 2 for", "A + 6 A = A + 2 for i in range(3): A", "2 for i in range(1): A = A + 2 A = A", "7 A = A + 2 for i in range(7): for i in", "9 A = A + 2 A = A + 5 for i", "for i in range(7): A = A + 8 for i in range(7):", "for i in range(9): for i in range(3): A = A + 7", "A + 5 A = A + 6 A = A + 2", "3 for i in range(1): for i in range(1): A = A +", "+ 2 A = A + 5 for i in range(1): A =", "A + 1 A = A + 4 for i in range(1): for", "7 for i in range(1): for i in range(1): for i in range(1):", "6 A = A + 5 A = A + 9 for i", "+ 6 A = A + 1 A = A + 3 A", "= A + 8 for i in range(4): A = A + 7", "+ 8 A = A + 4 A = A + 7 A", "+ 8 A = A + 2 A = A + 3 for", "5 for i in range(3): for i in range(5): A = A +", "+ 8 for i in range(4): A = A + 6 A =", "A + 6 A = A + 6 for i in range(1): A", "+ 4 A = A + 3 A = A + 6 for", "A + 9 A = A + 5 for i in range(9): for", "4 for i in range(6): A = A + 9 for i in", "for i in range(3): A = A + 9 for i in range(1):", "in range(5): A = A + 2 A = A + 5 A", "A = A + 8 A = A + 5 for i in", "in range(8): for i in range(8): A = A + 4 A =", "5 A = A + 6 A = A + 2 for i", "5 for i in range(9): for i in range(3): A = A +", "A + 9 A = A + 4 for i in range(9): A", "A = A + 3 A = A + 1 A = A", "7 A = A + 1 for i in range(2): A = A", "= A + 3 A = A + 9 for i in range(6):", "6 A = A + 5 for i in range(7): A = A", "= A + 6 A = A + 7 A = A +", "+ 2 for i in range(5): A = A + 8 for i", "range(5): A = A + 6 A = A + 9 A =", "+ 7 for i in range(8): A = A + 2 for i", "8 A = A + 7 A = A + 4 A =", "range(5): A = A + 8 for i in range(3): A = A", "+ 2 A = A + 8 for i in range(2): A =", "= A + 5 A = A + 8 for i in range(2):", "A + 5 for i in range(4): A = A + 2 A", "+ 7 A = A + 1 for i in range(6): A =", "= A + 4 for i in range(6): A = A + 9", "i in range(2): A = A + 5 A = A + 7", "7 A = A + 1 A = A + 9 for i", "+ 7 A = A + 6 A = A + 7 for", "5 for i in range(3): A = A + 9 for i in", "A = A + 9 for i in range(5): A = A +", "= A + 6 A = A + 2 for i in range(1):", "for i in range(8): A = A + 4 A = A +", "+ 5 for i in range(3): A = A + 7 for i", "for i in range(2): A = A + 2 A = A +", "= A + 5 A = A + 5 for i in range(5):", "+ 2 for i in range(5): A = A + 4 A =", "3 for i in range(2): A = A + 2 A = A", "i in range(1): A = A + 8 A = A + 5", "A + 8 for i in range(7): A = A + 5 A", "for i in range(5): A = A + 8 A = A +", "in range(6): A = A + 7 for i in range(7): A =", "A = A + 1 for i in range(9): for i in range(6):", "A = A + 5 for i in range(9): A = A +", "3 A = A + 5 A = A + 5 for i", "A + 5 for i in range(2): for i in range(3): A =", "+ 6 A = A + 4 for i in range(3): A =", "in range(1): A = A + 1 for i in range(6): A =", "i in range(2): A = A + 6 A = A + 7", "+ 9 A = A + 7 for i in range(5): A =", "for i in range(7): A = A + 4 for i in range(6):", "+ 1 A = A + 5 for i in range(9): A =", "i in range(1): A = A + 4 for i in range(5): A", "for i in range(9): A = A + 1 A = A +", "3 A = A + 6 A = A + 4 for i", "in range(2): for i in range(7): A = A + 3 A =", "8 for i in range(7): A = A + 5 A = A", "2 A = A + 1 A = A + 4 A =", "i in range(9): A = A + 5 A = A + 4", "A + 3 A = A + 1 A = A + 8", "A + 7 A = A + 4 for i in range(4): for", "A + 1 A = A + 7 A = A + 6", "5 A = A + 1 for i in range(6): A = A", "= A + 2 A = A + 1 for i in range(1):", "A + 5 A = A + 4 for i in range(8): A", "A + 3 A = A + 5 for i in range(6): A", "A + 1 A = A + 3 A = A + 9", "A = A + 4 for i in range(3): for i in range(2):", "for i in range(2): A = A + 4 A = A +", "A + 3 A = A + 5 for i in range(3): A", "= A + 6 A = A + 1 A = A +", "A + 1 A = A + 6 A = A + 1", "+ 5 A = A + 6 A = A + 2 A", "+ 4 for i in range(5): A = A + 8 A =", "= A + 1 for i in range(8): A = A + 5", "7 for i in range(7): A = A + 6 for i in", "in range(5): A = A + 8 for i in range(3): A =", "A + 2 for i in range(2): A = A + 7 A", "A + 8 for i in range(7): A = A + 7 A", "for i in range(1): for i in range(2): A = A + 4", "i in range(6): A = A + 4 A = A + 4", "+ 1 A = A + 6 A = A + 1 A", "A + 4 for i in range(9): for i in range(3): A =", "= A + 7 A = A + 9 A = A +", "5 A = A + 6 for i in range(1): A = A", "+ 3 for i in range(1): A = A + 2 for i", "2 A = A + 5 A = A + 8 A =", "= A + 8 for i in range(4): A = A + 6", "+ 4 A = A + 4 A = A + 8 for", "= A + 4 A = A + 5 A = A +", "A = A + 4 for i in range(1): A = A +", "+ 9 A = A + 5 for i in range(3): A =", "3 for i in range(1): A = A + 2 A = A", "+ 3 for i in range(4): A = A + 1 A =", "+ 1 A = A + 3 A = A + 2 A", "A + 7 A = A + 6 A = A + 7", "+ 6 A = A + 5 A = A + 5 A", "+ 5 A = A + 8 A = A + 2 A", "3 A = A + 3 A = A + 1 A =", "range(3): A = A + 6 for i in range(9): A = A", "6 A = A + 8 A = A + 9 A =", "in range(1): A = A + 2 for i in range(4): A =", "= A + 5 A = A + 5 A = A +", "A = A + 6 A = A + 1 for i in", "= A + 6 for i in range(3): A = A + 3", "1 for i in range(9): for i in range(7): A = A +", "= A + 7 A = A + 1 for i in range(6):", "i in range(7): A = A + 9 for i in range(9): A", "A + 6 for i in range(9): A = A + 3 A", "range(5): A = A + 5 for i in range(3): for i in", "4 A = A + 4 for i in range(3): A = A", "A + 8 for i in range(2): A = A + 4 A", "+ 2 for i in range(3): A = A + 1 for i", "A = A + 5 A = A + 5 A = A", "A + 1 A = A + 4 for i in range(8): A", "i in range(8): for i in range(8): A = A + 4 A", "+ 4 A = A + 9 A = A + 8 for", "+ 7 A = A + 6 A = A + 5 A", "+ 5 for i in range(2): A = A + 4 A =", "+ 7 A = A + 5 A = A + 5 for", "A = A + 2 for i in range(1): A = A +", "+ 1 A = A + 7 for i in range(8): A =", "A + 9 A = A + 3 for i in range(4): A", "for i in range(9): for i in range(6): A = A + 5", "A + 6 A = A + 8 for i in range(2): A", "for i in range(8): A = A + 6 for i in range(4):", "i in range(3): A = A + 2 A = A + 5", "9 A = A + 1 A = A + 9 for i", "i in range(2): A = A + 4 for i in range(5): for", "+ 4 A = A + 9 for i in range(1): A =", "+ 9 for i in range(1): A = A + 8 for i", "for i in range(9): A = A + 8 A = A +", "range(9): A = A + 7 A = A + 8 for i", "5 A = A + 7 for i in range(5): A = A", "i in range(8): A = A + 7 A = A + 8", "= A + 6 for i in range(2): A = A + 3", "+ 5 for i in range(3): A = A + 6 for i", "5 A = A + 2 for i in range(2): A = A", "A + 3 for i in range(2): A = A + 2 A", "range(1): for i in range(1): A = A + 4 A = A", "A + 8 for i in range(2): A = A + 7 A", "A = A + 7 A = A + 7 A = A", "+ 7 A = A + 8 A = A + 3 A", "4 A = A + 9 A = A + 8 for i", "range(6): A = A + 4 A = A + 3 A =", "1 A = A + 7 for i in range(8): A = A", "range(4): A = A + 4 A = A + 3 A =", "i in range(9): for i in range(3): A = A + 4 A", "A + 5 for i in range(1): A = A + 5 A", "i in range(6): A = A + 9 A = A + 1", "= A + 9 A = A + 7 for i in range(9):", "+ 5 A = A + 1 A = A + 6 A", "range(3): A = A + 3 for i in range(8): for i in", "in range(1): A = A + 7 for i in range(9): A =", "= A + 1 A = A + 6 for i in range(2):", "A = A + 2 A = A + 7 A = A", "A + 1 A = A + 8 for i in range(6): A", "7 A = A + 5 A = A + 5 for i", "+ 5 A = A + 5 for i in range(2): A =", "4 A = A + 3 A = A + 6 for i", "for i in range(7): A = A + 2 A = A +", "in range(7): A = A + 3 for i in range(5): A =", "i in range(4): A = A + 8 A = A + 6", "A + 1 for i in range(9): for i in range(6): A =", "A + 2 A = A + 9 A = A + 6", "1 for i in range(6): A = A + 2 A = A", "A + 8 A = A + 7 A = A + 8", "A + 8 for i in range(1): A = A + 6 A", "A + 4 for i in range(3): for i in range(2): A =", "1 for i in range(9): for i in range(5): A = A +", "for i in range(6): A = A + 1 for i in range(9):", "A + 2 A = A + 1 A = A + 4", "range(1): A = A + 5 A = A + 7 A =", "+ 7 for i in range(1): for i in range(1): for i in", "i in range(3): A = A + 1 A = A + 1", "= A + 7 for i in range(3): A = A + 6", "4 for i in range(3): A = A + 7 A = A", "= A + 3 for i in range(2): A = A + 5", "in range(5): A = A + 4 for i in range(4): A =", "+ 6 A = A + 1 A = A + 5 A", "A + 3 A = A + 6 for i in range(6): A", "for i in range(5): A = A + 5 A = A +", "7 A = A + 2 A = A + 1 A =", "A + 1 for i in range(1): A = A + 8 A", "= A + 4 A = A + 4 A = A +", "i in range(3): A = A + 3 A = A + 7", "A + 2 A = A + 2 A = A + 7", "A + 3 A = A + 5 A = A + 2", "9 A = A + 6 for i in range(4): for i in", "= A + 9 for i in range(9): A = A + 4", "+ 2 A = A + 8 A = A + 2 A", "+ 4 for i in range(2): A = A + 7 for i", "in range(2): A = A + 1 A = A + 5 A", "A + 3 A = A + 2 A = A + 1", "A = A + 7 A = A + 4 for i in", "A + 3 A = A + 9 for i in range(4): for", "A + 4 A = A + 5 for i in range(3): A", "+ 3 A = A + 4 A = A + 5 for", "+ 2 for i in range(2): A = A + 3 for i", "range(4): A = A + 5 A = A + 5 A =", "range(4): A = A + 7 A = A + 9 A =", "i in range(1): A = A + 6 A = A + 4", "A + 3 A = A + 2 for i in range(6): A", "A + 2 A = A + 2 A = A + 1", "A + 7 for i in range(4): A = A + 2 A", "+ 9 A = A + 4 A = A + 7 for", "= A + 9 for i in range(2): for i in range(6): A", "range(5): A = A + 3 A = A + 6 for i", "+ 7 A = A + 1 A = A + 8 for", "3 A = A + 1 A = A + 8 A =", "4 for i in range(5): for i in range(6): A = A +", "for i in range(3): A = A + 1 A = A +", "6 for i in range(9): A = A + 1 A = A", "A = A + 2 A = A + 8 A = A", "A + 4 for i in range(2): A = A + 3 for", "+ 5 for i in range(1): A = A + 6 for i", "A + 7 A = A + 7 A = A + 5", "+ 1 for i in range(8): A = A + 5 for i", "= A + 5 A = A + 7 for i in range(5):", "3 A = A + 7 for i in range(5): for i in", "= A + 9 A = A + 5 for i in range(2):", "+ 3 for i in range(6): for i in range(1): A = A", "in range(1): A = A + 4 for i in range(3): A =", "range(2): A = A + 4 A = A + 7 for i", "A = A + 7 A = A + 6 A = A", "+ 5 A = A + 7 for i in range(5): A =", "in range(1): A = A + 8 A = A + 5 A", "A = A + 2 for i in range(2): A = A +", "+ 4 A = A + 7 for i in range(3): A =", "in range(7): for i in range(8): for i in range(6): A = A", "+ 1 A = A + 1 A = A + 5 for", "+ 6 A = A + 8 for i in range(1): A =", "= A + 2 A = A + 5 A = A +", "in range(7): A = A + 3 A = A + 5 for", "+ 7 A = A + 8 for i in range(4): A =", "A + 2 A = A + 5 for i in range(8): for", "i in range(8): A = A + 7 for i in range(6): A", "range(8): for i in range(5): A = A + 9 A = A", "A + 5 A = A + 5 A = A + 6", "A + 7 for i in range(8): A = A + 2 for", "i in range(4): A = A + 8 for i in range(7): A", "5 for i in range(7): A = A + 5 for i in", "i in range(6): A = A + 9 for i in range(5): A", "= A + 8 for i in range(7): A = A + 5", "5 A = A + 6 for i in range(7): for i in", "i in range(2): A = A + 3 for i in range(5): A", "A + 2 for i in range(8): A = A + 9 for", "i in range(6): A = A + 9 A = A + 3", "+ 4 A = A + 8 A = A + 4 for", "+ 6 for i in range(1): A = A + 7 A =", "7 A = A + 7 for i in range(7): for i in", "= A + 5 for i in range(7): A = A + 8", "range(2): A = A + 4 A = A + 9 A =", "in range(1): for i in range(2): A = A + 4 A =", "= A + 6 A = A + 6 A = A +", "A + 5 A = A + 6 for i in range(7): for", "1 A = A + 4 for i in range(6): A = A", "1 A = A + 8 for i in range(6): A = A", "= A + 4 for i in range(6): A = A + 2", "in range(7): A = A + 8 for i in range(7): A =", "in range(2): A = A + 6 for i in range(1): for i", "range($1) A = 0 for i in range(2): A = A + 4", "A = A + 3 for i in range(5): A = A +", "+ 4 for i in range(4): A = A + 6 A =", "5 A = A + 6 for i in range(2): A = A", "+ 3 for i in range(2): A = A + 2 A =", "2 A = A + 1 A = A + 8 A =", "in range(5): A = A + 9 A = A + 8 A", "i in range(9): A = A + 5 for i in range(1): A", "A + 7 for i in range(7): A = A + 2 A", "for i in range(3): A = A + 7 for i in range(9):", "8 A = A + 4 A = A + 7 A =", "+ 6 A = A + 2 for i in range(5): A =", "for i in range(6): for i in range(2): A = A + 1", "in range(9): for i in range(3): A = A + 7 A =", "range(4): A = A + 8 A = A + 6 A =", "A + 9 for i in range(5): A = A + 2 A", "= A + 6 A = A + 2 for i in range(3):", "= A + 5 for i in range(6): A = A + 3", "i in range(5): A = A + 3 A = A + 6", "A + 3 for i in range(3): A = A + 2 A", "+ 8 A = A + 3 for i in range(8): A =", "in range(6): A = A + 3 for i in range(4): A =", "A + 4 for i in range(5): for i in range(6): A =", "A + 6 for i in range(2): A = A + 3 A", "+ 4 A = A + 2 A = A + 8 A", "+ 5 for i in range(1): for i in range(8): A = A", "range(1): A = A + 2 for i in range(7): A = A", "in range(4): A = A + 6 A = A + 8 for", "= A + 6 A = A + 8 for i in range(6):", "i in range(1): A = A + 6 A = A + 6", "i in range(8): A = A + 2 for i in range(8): for", "in range(4): A = A + 5 A = A + 5 A", "+ 2 A = A + 1 A = A + 4 for", "A = A + 4 A = A + 5 A = A", "= A + 2 A = A + 9 A = A +", "5 A = A + 5 A = A + 4 for i", "i in range(5): A = A + 8 for i in range(3): A", "= A + 8 for i in range(2): A = A + 6", "= A + 5 for i in range(1): A = A + 4", "= A + 9 A = A + 1 for i in range(6):", "for i in range(7): for i in range(2): A = A + 3", "in range(9): for i in range(7): A = A + 3 for i", "4 for i in range(8): for i in range(7): A = A +", "range(1): A = A + 9 A = A + 3 for i", "A + 5 for i in range(2): A = A + 7 for", "A + 3 for i in range(4): A = A + 5 A", "A = A + 3 for i in range(5): for i in range(6):", "i in range(9): A = A + 6 for i in range(1): for", "A = A + 8 for i in range(8): for i in range(5):", "6 A = A + 8 for i in range(2): A = A", "A = A + 2 for i in range(9): A = A +", "8 for i in range(4): A = A + 2 for i in", "9 for i in range(5): A = A + 9 A = A", "A + 7 A = A + 1 A = A + 8", "in range(9): A = A + 6 for i in range(1): for i", "for i in range(7): for i in range(5): A = A + 7", "= A + 2 for i in range(7): for i in range(4): A", "for i in range(4): for i in range(1): A = A + 6", "A + 9 A = A + 6 for i in range(9): A", "for i in range(2): A = A + 6 for i in range(6):", "+ 8 for i in range(7): A = A + 8 for i", "A = A + 7 A = A + 3 A = A", "range(3): A = A + 7 for i in range(3): A = A", "for i in range(2): A = A + 7 for i in range(1):", "i in range(9): for i in range(6): A = A + 4 A", "2 for i in range(5): A = A + 4 A = A", "i in range(8): for i in range(1): A = A + 7 A", "A + 8 A = A + 3 A = A + 1", "range(9): A = A + 4 A = A + 9 A =", "5 for i in range(2): A = A + 7 for i in", "+ 7 for i in range(6): A = A + 4 A =", "7 A = A + 5 A = A + 8 A =", "4 A = A + 5 for i in range(3): A = A", "7 A = A + 7 A = A + 7 for i", "range(6): A = A + 4 A = A + 4 A =", "range(1): A = A + 8 A = A + 5 A =", "A + 7 A = A + 7 for i in range(5): A", "A = A + 2 A = A + 6 A = A", "i in range(4): for i in range(2): A = A + 3 for", "i in range(5): A = A + 5 A = A + 8", "i in range(6): A = A + 9 A = A + 2", "A + 3 for i in range(2): A = A + 4 for", "+ 7 for i in range(9): for i in range(6): A = A", "i in range(1): A = A + 4 A = A + 2", "= A + 5 A = A + 6 A = A +", "A + 1 A = A + 5 A = A + 2", "A = A + 8 A = A + 8 A = A", "= A + 7 for i in range(9): A = A + 2", "A + 5 for i in range(8): A = A + 6 for", "3 A = A + 9 A = A + 5 A =", "5 for i in range(2): A = A + 4 A = A", "= A + 9 A = A + 3 for i in range(4):", "i in range(8): for i in range(6): A = A + 3 for", "A + 9 A = A + 3 A = A + 2", "+ 3 for i in range(5): A = A + 9 for i", "+ 8 A = A + 3 A = A + 1 A", "6 for i in range(9): A = A + 6 for i in", "= A + 8 for i in range(2): A = A + 4", "9 A = A + 5 A = A + 3 A =", "i in range(9): A = A + 2 A = A + 7", "A + 8 A = A + 4 A = A + 7", "= A + 3 for i in range(3): A = A + 2", "range(9): A = A + 9 A = A + 8 for i", "A + 9 A = A + 5 A = A + 3", "A = A + 9 A = A + 1 A = A", "in range(9): A = A + 9 A = A + 8 for", "A + 1 A = A + 7 A = A + 7", "= A + 2 for i in range(2): A = A + 7", "2 A = A + 4 A = A + 3 for i", "A + 4 A = A + 4 for i in range(3): A", "3 for i in range(4): A = A + 5 A = A", "+ 4 for i in range(6): A = A + 6 for i", "range(1): A = A + 6 A = A + 4 A =", "+ 6 A = A + 2 for i in range(1): A =", "i in range(6): A = A + 3 for i in range(9): A", "+ 7 A = A + 1 A = A + 9 for", "+ 8 A = A + 7 A = A + 2 for", "i in range(8): A = A + 2 A = A + 5", "A + 7 for i in range(4): for i in range(7): A =", "+ 6 A = A + 8 for i in range(6): A =", "9 A = A + 2 for i in range(8): A = A", "in range(2): A = A + 3 for i in range(8): A =", "= A + 3 A = A + 6 for i in range(9):", "in range(6): A = A + 4 A = A + 5 for", "i in range(9): A = A + 8 A = A + 9", "= A + 6 A = A + 3 for i in range(7):", "A + 4 for i in range(8): for i in range(7): A =", "+ 7 for i in range(8): A = A + 6 A =", "+ 3 for i in range(5): A = A + 3 for i", "+ 6 A = A + 3 for i in range(5): for i", "i in range(9): A = A + 3 for i in range(9): A", "A + 8 A = A + 5 for i in range(2): A", "+ 2 A = A + 1 A = A + 8 A", "A + 7 for i in range(7): A = A + 6 for", "i in range(6): A = A + 5 A = A + 7", "4 A = A + 1 for i in range(9): A = A", "A + 2 for i in range(9): A = A + 5 for", "+ 3 A = A + 1 A = A + 2 A", "for i in range(1): A = A + 8 A = A +", "1 A = A + 5 for i in range(2): A = A", "8 for i in range(1): A = A + 6 A = A", "= A + 2 for i in range(6): A = A + 3", "in range(8): A = A + 8 A = A + 1 A", "6 for i in range(5): A = A + 8 A = A", "range(3): for i in range(4): A = A + 9 for i in", "for i in range(2): A = A + 3 for i in range(8):", "i in range(6): A = A + 1 A = A + 6", "= A + 8 for i in range(9): A = A + 8", "in range(3): A = A + 5 A = A + 5 A", "+ 5 for i in range(3): A = A + 9 for i", "+ 1 A = A + 3 A = A + 9 for", "range(3): A = A + 3 A = A + 4 A =", "+ 6 A = A + 9 for i in range(1): A =", "2 for i in range(6): for i in range(1): A = A +", "A + 5 for i in range(9): for i in range(3): A =", "for i in range(5): A = A + 2 A = A +", "1 A = A + 6 for i in range(2): A = A", "for i in range(1): for i in range(1): A = A + 1", "in range(5): A = A + 6 A = A + 2 for", "+ 7 for i in range(7): A = A + 6 for i", "i in range(1): A = A + 7 A = A + 8", "+ 2 A = A + 4 for i in range(5): for i", "range(3): for i in range(6): A = A + 8 A = A", "range(5): A = A + 7 A = A + 5 A =", "6 A = A + 4 A = A + 6 for i", "i in range(1): for i in range(4): A = A + 6 A", "= A + 8 A = A + 1 A = A +", "A = A + 4 for i in range(7): A = A +", "A + 7 A = A + 5 A = A + 5", "in range(1): for i in range(1): for i in range(1): A = A", "= A + 7 for i in range(3): A = A + 4", "for i in range(7): for i in range(7): for i in range(5): A", "in range(9): A = A + 6 A = A + 8 A", "A = A + 1 A = A + 9 for i in", "4 A = A + 4 A = A + 2 A =", "+ 8 A = A + 9 A = A + 5 for", "i in range(5): A = A + 1 for i in range(8): A", "+ 8 A = A + 1 A = A + 6 A", "+ 4 for i in range(2): A = A + 9 for i", "2 for i in range(7): A = A + 5 A = A", "= A + 1 A = A + 6 A = A +", "+ 8 A = A + 4 for i in range(1): A =", "for i in range(3): A = A + 3 for i in range(8):", "6 A = A + 4 A = A + 3 A =", "A + 8 for i in range(4): A = A + 7 A", "9 for i in range(7): A = A + 2 for i in", "3 for i in range(2): for i in range(8): A = A +", "in range(3): A = A + 5 A = A + 3 A", "7 A = A + 8 for i in range(4): A = A", "+ 3 A = A + 5 A = A + 5 for", "+ 9 for i in range(1): A = A + 4 for i", "A + 3 for i in range(9): A = A + 2 A", "for i in range(1): for i in range(4): A = A + 6", "2 A = A + 6 A = A + 8 A =", "A + 9 A = A + 8 for i in range(5): A", "A = A + 3 A = A + 3 A = A", "A = A + 8 A = A + 3 for i in", "A + 3 for i in range(8): A = A + 2 A", "range(3): A = A + 1 A = A + 1 for i", "A = A + 9 for i in range(4): for i in range(1):", "for i in range(6): A = A + 5 A = A +", "4 A = A + 4 for i in range(8): A = A", "+ 9 A = A + 6 A = A + 3 for", "A + 6 A = A + 8 for i in range(6): A", "+ 8 A = A + 1 A = A + 4 for", "for i in range(1): A = A + 3 A = A +", "+ 6 A = A + 1 A = A + 7 for", "5 A = A + 3 A = A + 3 A =", "+ 4 for i in range(9): A = A + 2 A =", "5 for i in range(1): A = A + 8 A = A", "range(7): A = A + 9 for i in range(9): A = A", "+ 4 for i in range(8): A = A + 9 A =", "range(3): A = A + 1 A = A + 8 for i", "range(2): A = A + 3 for i in range(3): A = A", "+ 8 for i in range(8): for i in range(5): A = A", "A + 4 A = A + 5 A = A + 8", "A + 7 A = A + 8 A = A + 5", "A + 3 for i in range(4): A = A + 4 A", "A = A + 6 A = A + 1 A = A", "= A + 3 A = A + 6 for i in range(5):", "4 A = A + 3 for i in range(6): for i in", "+ 1 for i in range(1): A = A + 1 for i", "A + 6 A = A + 4 for i in range(9): A", "= A + 6 A = A + 2 for i in range(9):", "for i in range(3): A = A + 5 for i in range(9):", "A + 2 A = A + 4 A = A + 7", "range(1): A = A + 7 A = A + 8 A =", "A + 8 for i in range(4): for i in range(4): A =", "+ 4 for i in range(6): A = A + 6 A =", "A + 8 for i in range(4): A = A + 6 A", "A = A + 9 A = A + 4 for i in", "A + 1 for i in range(5): A = A + 6 for", "1 A = A + 3 A = A + 2 A =", "i in range(1): A = A + 2 for i in range(4): A", "range(9): A = A + 1 A = A + 7 for i", "in range(2): A = A + 5 A = A + 1 A", "+ 4 for i in range(8): for i in range(7): A = A", "4 for i in range(3): A = A + 1 A = A", "i in range(9): A = A + 9 A = A + 2", "range(3): A = A + 2 A = A + 9 A =", "5 A = A + 2 A = A + 4 A =", "range(4): for i in range(9): A = A + 2 for i in", "A + 7 A = A + 9 A = A + 8", "A = A + 7 for i in range(7): for i in range(7):", "A = A + 5 A = A + 9 for i in", "in range(6): A = A + 4 A = A + 3 A", "= A + 3 A = A + 2 A = A +", "= A + 2 for i in range(7): A = A + 5", "7 for i in range(7): A = A + 3 A = A", "in range(9): for i in range(6): A = A + 4 A =", "5 for i in range(2): for i in range(3): A = A +", "A = A + 6 A = A + 2 for i in", "A + 2 for i in range(5): A = A + 4 A", "in range(8): for i in range(6): A = A + 1 A =", "A + 3 for i in range(2): A = A + 5 A", "A = A + 6 A = A + 8 A = A", "+ 5 for i in range(1): A = A + 5 A =", "range(3): A = A + 1 for i in range(1): A = A", "A + 7 for i in range(9): for i in range(6): A =", "for i in range(4): A = A + 1 A = A +", "for i in range(3): A = A + 6 for i in range(5):", "range(6): for i in range(2): A = A + 1 A = A", "7 A = A + 2 for i in range(3): A = A", "5 for i in range(6): A = A + 3 for i in", "= A + 7 for i in range(8): A = A + 6", "A + 6 A = A + 5 A = A + 9", "in range(4): A = A + 6 A = A + 6 A", "+ 3 A = A + 5 for i in range(7): A =", "A + 8 A = A + 3 for i in range(2): A", "range(2): A = A + 7 for i in range(8): A = A", "7 A = A + 7 A = A + 6 A =", "= A + 8 for i in range(9): A = A + 5", "i in range(7): A = A + 7 A = A + 7", "= A + 1 A = A + 8 for i in range(7):", "range(4): A = A + 6 A = A + 6 A =", "7 A = A + 7 A = A + 3 A =", "for i in range(8): for i in range(6): A = A + 3", "range(1): for i in range(2): A = A + 2 for i in", "A + 5 A = A + 2 for i in range(7): for", "in range(1): A = A + 2 for i in range(5): A =", "4 A = A + 5 A = A + 8 for i", "A + 9 A = A + 7 for i in range(3): A", "+ 9 A = A + 7 for i in range(3): A =", "= A + 3 A = A + 9 for i in range(4):", "= A + 1 A = A + 8 for i in range(6):", "A = A + 3 for i in range(9): A = A +", "in range(9): for i in range(3): A = A + 4 A =", "+ 2 A = A + 2 A = A + 8 for", "i in range(1): A = A + 4 A = A + 6", "for i in range(1): A = A + 4 for i in range(9):", "A = A + 3 A = A + 5 for i in", "+ 1 A = A + 4 for i in range(1): for i", "7 for i in range(1): A = A + 8 A = A", "9 A = A + 4 A = A + 7 for i", "+ 4 A = A + 4 for i in range(8): A =", "3 A = A + 5 A = A + 2 A =", "in range(1): A = A + 4 A = A + 6 for", "A + 2 for i in range(2): A = A + 4 for", "= A + 2 for i in range(9): for i in range(5): A", "in range(2): A = A + 2 A = A + 5 A", "+ 8 A = A + 4 for i in range(5): for i", "i in range(1): A = A + 7 for i in range(9): A", "i in range(9): A = A + 5 A = A + 5", "i in range(3): A = A + 3 A = A + 9", "A = A + 9 A = A + 6 for i in", "+ 8 for i in range(4): A = A + 2 for i", "9 for i in range(3): A = A + 5 A = A", "= A + 6 A = A + 7 for i in range(4):", "for i in range(8): for i in range(2): A = A + 6", "= A + 5 A = A + 2 for i in range(7):", "i in range(9): A = A + 1 A = A + 9", "= A + 7 A = A + 3 for i in range(2):", "A + 7 for i in range(7): A = A + 3 A", "for i in range(4): A = A + 9 for i in range(5):", "= A + 4 for i in range(4): for i in range(9): A", "A + 7 for i in range(9): A = A + 2 A", "= A + 5 A = A + 4 A = A +", "A + 4 for i in range(4): for i in range(4): A =", "in range(8): A = A + 9 A = A + 1 for", "0 for i in range(2): A = A + 4 for i in", "= A + 8 for i in range(8): for i in range(5): A", "in range(4): A = A + 8 for i in range(7): A =", "A + 4 A = A + 8 A = A + 4", "9 for i in range(5): A = A + 5 A = A", "A + 6 A = A + 5 for i in range(7): A", "for i in range(1): A = A + 6 for i in range(2):", "2 A = A + 2 A = A + 1 for i", "A + 1 A = A + 8 A = A + 3", "i in range(3): A = A + 7 A = A + 1", "for i in range(9): for i in range(3): A = A + 4", "in range(2): A = A + 9 for i in range(1): A =", "4 for i in range(1): A = A + 5 for i in", "for i in range(1): A = A + 4 A = A +", "A + 9 for i in range(5): A = A + 5 for", "in range(7): A = A + 5 for i in range(3): A =", "+ 2 for i in range(1): A = A + 4 for i", "8 A = A + 8 A = A + 1 A =", "= A + 7 for i in range(5): A = A + 3", "A = A + 2 for i in range(9): for i in range(3):", "i in range(9): A = A + 1 A = A + 3", "range(2): A = A + 6 for i in range(4): A = A", "range(4): for i in range(4): A = A + 8 for i in", "= A + 9 A = A + 3 for i in range(1):", "A = A + 2 for i in range(7): A = A +", "in range(1): A = A + 1 for i in range(8): A =", "7 for i in range(4): A = A + 7 A = A", "A + 2 for i in range(3): A = A + 1 for", "i in range(2): for i in range(7): A = A + 3 A", "range(2): A = A + 3 for i in range(8): A = A", "A + 9 A = A + 2 for i in range(8): A", "8 A = A + 4 for i in range(1): A = A", "2 A = A + 7 A = A + 5 A =", "+ 4 A = A + 2 A = A + 8 for", "i in range(2): A = A + 9 for i in range(1): A", "i in range(2): A = A + 7 for i in range(1): A", "range(1): A = A + 4 A = A + 6 for i", "range(3): A = A + 1 for i in range(1): for i in", "+ 5 for i in range(2): A = A + 7 for i", "= A + 8 for i in range(2): A = A + 7", "for i in range(7): A = A + 7 A = A +", "in range(6): A = A + 5 A = A + 5 for", "A = A + 8 for i in range(8): A = A +", "A + 1 for i in range(6): A = A + 1 for", "5 A = A + 8 A = A + 8 A =", "5 A = A + 5 A = A + 6 A =", "range(2): A = A + 7 for i in range(9): A = A", "i in range(9): A = A + 4 A = A + 1", "= A + 7 A = A + 2 for i in range(7):", "A + 6 A = A + 3 for i in range(7): A", "for i in range(4): for i in range(4): A = A + 1", "in range(5): A = A + 2 A = A + 1 for", "5 for i in range(3): A = A + 1 for i in", "i in range(5): A = A + 3 for i in range(6): for", "in range(3): A = A + 4 A = A + 4 A", "in range(1): A = A + 2 for i in range(7): A =", "A + 7 A = A + 4 for i in range(8): A", "i in range(1): for i in range(1): A = A + 4 A", "+ 8 A = A + 8 A = A + 1 A", "+ 4 for i in range(1): for i in range(7): for i in", "3 A = A + 6 for i in range(6): A = A", "1 A = A + 3 for i in range(5): for i in", "i in range(1): A = A + 6 A = A + 8", "= A + 1 for i in range(2): A = A + 2", "= A + 3 A = A + 3 A = A +", "= A + 7 for i in range(5): A = A + 1", "range(9): A = A + 2 for i in range(1): A = A", "A = A + 9 for i in range(9): for i in range(2):", "5 A = A + 6 A = A + 2 A =", "in range(3): A = A + 7 for i in range(3): A =", "for i in range(6): A = A + 6 A = A +", "i in range(5): A = A + 7 A = A + 7", "for i in range(7): A = A + 5 A = A +", "A = A + 5 A = A + 5 for i in", "= A + 9 A = A + 1 for i in range(4):", "3 for i in range(2): A = A + 5 A = A", "range(2): for i in range(8): A = A + 5 A = A", "A + 3 for i in range(1): A = A + 2 for", "+ 2 for i in range(5): A = A + 7 A =", "A + 2 for i in range(9): for i in range(5): A =", "+ 8 A = A + 2 A = A + 5 for", "+ 7 A = A + 2 for i in range(3): A =", "+ 6 A = A + 1 A = A + 2 for", "8 A = A + 3 A = A + 6 A =", "A + 6 A = A + 1 A = A + 5", "range(3): for i in range(2): A = A + 1 A = A", "= A + 2 for i in range(8): A = A + 9", "6 A = A + 1 A = A + 7 for i", "+ 9 A = A + 6 A = A + 2 for", "2 A = A + 8 A = A + 3 A =", "A = A + 4 for i in range(9): A = A +", "4 for i in range(4): for i in range(4): A = A +", "in range(6): A = A + 5 A = A + 1 for", "= A + 5 A = A + 6 for i in range(7):", "A + 1 A = A + 2 for i in range(6): A", "+ 6 A = A + 9 A = A + 2 for", "A + 5 A = A + 8 A = A + 8", "A = A + 4 A = A + 4 A = A", "A = A + 8 A = A + 6 A = A", "4 for i in range(4): A = A + 6 A = A", "in range(5): A = A + 1 for i in range(8): A =", "= A + 5 for i in range(2): for i in range(3): A", "+ 4 A = A + 6 for i in range(1): A =", "A = A + 7 for i in range(8): for i in range(1):", "i in range(2): A = A + 4 A = A + 1", "range(5): A = A + 9 A = A + 3 for i", "= A + 4 A = A + 9 A = A +", "+ 4 for i in range(3): A = A + 7 A =", "A + 2 for i in range(9): for i in range(3): A =", "A + 7 for i in range(5): A = A + 7 A", "= A + 3 A = A + 5 A = A +", "+ 9 for i in range(3): A = A + 5 A =", "+ 6 for i in range(2): A = A + 4 A =", "= A + 8 for i in range(1): A = A + 6", "= A + 1 A = A + 7 for i in range(1):", "+ 1 for i in range(6): A = A + 1 for i", "in range(9): for i in range(5): A = A + 9 A =", "3 A = A + 5 for i in range(7): A = A", "A + 9 A = A + 8 for i in range(1): A", "A + 7 A = A + 2 for i in range(3): A", "range(8): for i in range(2): A = A + 6 A = A", "+ 3 for i in range(8): A = A + 7 A =", "5 A = A + 2 for i in range(7): for i in", "A + 7 A = A + 7 A = A + 7", "+ 8 for i in range(3): A = A + 2 A =", "= A + 6 A = A + 8 for i in range(2):", "in range(3): A = A + 4 A = A + 3 A", "A + 7 for i in range(8): for i in range(1): A =", "i in range(8): for i in range(6): A = A + 1 A", "i in range(1): A = A + 5 A = A + 7", "= A + 2 for i in range(3): A = A + 3", "+ 9 A = A + 3 for i in range(2): A =", "5 A = A + 1 A = A + 6 A =", "+ 7 A = A + 9 A = A + 2 for", "A = A + 9 for i in range(7): A = A +", "5 for i in range(5): A = A + 5 for i in", "i in range(4): A = A + 6 A = A + 3", "= A + 5 for i in range(8): A = A + 6", "A = A + 1 A = A + 4 for i in", "1 for i in range(2): A = A + 2 A = A", "range(9): A = A + 1 A = A + 3 A =", "for i in range(7): A = A + 8 A = A +", "i in range(3): for i in range(2): A = A + 1 A", "A + 3 A = A + 9 A = A + 7", "+ 5 for i in range(4): A = A + 7 A =", "+ 9 for i in range(6): A = A + 9 A =", "A + 6 A = A + 3 for i in range(5): for", "A = A + 1 A = A + 6 A = A", "i in range(4): A = A + 8 A = A + 7", "A + 3 for i in range(3): A = A + 3 A", "A + 9 A = A + 5 for i in range(2): A", "+ 4 for i in range(1): A = A + 5 for i", "A + 5 A = A + 2 A = A + 2", "for i in range(9): A = A + 6 A = A +", "+ 9 A = A + 1 for i in range(8): A =", "= A + 4 A = A + 2 A = A +", "i in range(2): A = A + 5 A = A + 6", "9 for i in range(9): for i in range(2): A = A +", "A + 2 for i in range(1): A = A + 6 A", "range(7): A = A + 2 for i in range(5): A = A", "+ 1 A = A + 9 A = A + 5 for", "= A + 8 A = A + 2 A = A +", "4 for i in range(9): A = A + 4 A = A", "= A + 5 for i in range(2): A = A + 7", "+ 1 A = A + 5 A = A + 8 for", "range(2): A = A + 4 A = A + 1 for i", "+ 2 A = A + 8 A = A + 6 for", "A + 6 A = A + 9 A = A + 1", "+ 9 A = A + 1 for i in range(4): for i", "+ 9 A = A + 8 for i in range(4): A =", "in range(3): A = A + 1 for i in range(1): A =", "+ 1 A = A + 7 for i in range(1): A =", "+ 2 for i in range(9): for i in range(5): A = A", "i in range(3): A = A + 3 for i in range(8): for", "i in range(6): A = A + 5 for i in range(6): A", "4 A = A + 8 A = A + 4 for i", "5 A = A + 9 for i in range(5): A = A", "+ 1 A = A + 2 A = A + 4 A", "range(4): A = A + 5 A = A + 4 for i", "A + 6 for i in range(2): A = A + 4 A", "A = A + 7 A = A + 9 A = A", "in range(1): for i in range(1): A = A + 4 A =", "range(4): A = A + 8 A = A + 7 A =", "= A + 2 A = A + 7 A = A +", "A + 3 A = A + 7 for i in range(5): for", "= A + 1 for i in range(9): A = A + 9", "in range(6): A = A + 3 for i in range(9): A =", "+ 5 for i in range(7): A = A + 3 A =", "+ 7 for i in range(6): A = A + 7 for i", "+ 9 A = A + 5 A = A + 9 for", "4 A = A + 6 for i in range(7): A = A", "in range(8): A = A + 7 for i in range(6): A =", "for i in range(1): for i in range(8): A = A + 5", "i in range(1): A = A + 7 A = A + 1", "A + 1 A = A + 7 A = A + 4", "= A + 7 for i in range(7): for i in range(4): A", "+ 5 for i in range(2): A = A + 1 A =", "= A + 1 for i in range(1): A = A + 4", "for i in range(2): A = A + 6 A = A +", "= A + 4 for i in range(4): for i in range(4): A", "6 for i in range(5): A = A + 9 A = A", "A + 9 for i in range(1): A = A + 8 for", "A + 9 for i in range(1): A = A + 4 for", "+ 2 A = A + 7 for i in range(4): A =", "for i in range(5): A = A + 9 A = A +", "i in range(4): A = A + 9 A = A + 4", "in range(1): A = A + 7 A = A + 8 A", "1 A = A + 7 A = A + 6 A =", "for i in range(5): A = A + 3 for i in range(6):", "A + 6 A = A + 4 for i in range(3): A", "2 for i in range(2): A = A + 4 for i in", "+ 1 for i in range(9): A = A + 9 A =", "A + 7 for i in range(5): A = A + 1 A", "8 A = A + 2 for i in range(9): A = A", "= A + 4 A = A + 7 for i in range(5):", "i in range(6): A = A + 7 for i in range(7): A", "4 for i in range(6): A = A + 6 A = A", "= A + 6 for i in range(4): for i in range(2): A", "+ 9 A = A + 4 for i in range(2): A =", "+ 5 A = A + 4 for i in range(8): A =", "9 A = A + 7 for i in range(9): A = A", "for i in range(2): A = A + 7 for i in range(9):", "= A + 5 for i in range(2): A = A + 4", "range(7): A = A + 7 A = A + 7 for i", "+ 9 A = A + 6 for i in range(4): for i", "= A + 5 for i in range(1): A = A + 6", "range(5): A = A + 5 A = A + 4 A =", "in range(9): A = A + 3 A = A + 4 A", "in range(4): A = A + 2 A = A + 7 A", "for i in range(4): A = A + 5 A = A +", "+ 3 for i in range(3): A = A + 2 A =", "in range(1): A = A + 5 for i in range(7): A =", "i in range(2): A = A + 3 for i in range(3): A", "2 A = A + 9 A = A + 6 A =", "A = A + 3 A = A + 6 A = A", "+ 9 A = A + 1 A = A + 8 A", "i in range(3): A = A + 7 for i in range(9): for", "+ 8 A = A + 6 for i in range(3): for i", "in range(8): A = A + 7 A = A + 8 A", "i in range(2): A = A + 3 A = A + 5", "+ 8 A = A + 9 A = A + 3 A", "range(9): A = A + 2 A = A + 7 A =", "A + 2 A = A + 6 A = A + 1", "+ 3 A = A + 5 for i in range(3): A =", "A + 3 A = A + 7 for i in range(5): A", "1 A = A + 6 A = A + 5 A =", "= A + 7 A = A + 7 for i in range(5):", "A = A + 8 A = A + 4 A = A", "= A + 7 for i in range(8): for i in range(1): A", "for i in range(3): A = A + 3 A = A +", "in range(5): for i in range(3): for i in range(6): A = A", "A + 4 A = A + 1 for i in range(9): A", "= A + 6 for i in range(1): A = A + 9", "+ 7 for i in range(7): for i in range(7): for i in", "= A + 3 for i in range(2): A = A + 7", "+ 4 A = A + 5 for i in range(3): A =", "6 A = A + 9 A = A + 5 for i", "= A + 9 A = A + 8 for i in range(5):", "1 A = A + 9 A = A + 5 for i", "range(6): A = A + 5 A = A + 7 for i", "i in range(2): A = A + 3 for i in range(1): A", "in range(4): A = A + 1 A = A + 2 for", "3 for i in range(3): A = A + 2 A = A", "5 A = A + 8 for i in range(8): A = A", "A = A + 1 for i in range(9): for i in range(7):", "A + 5 for i in range(1): A = A + 2 for", "4 A = A + 3 A = A + 3 for i", "A + 9 for i in range(3): A = A + 5 A", "2 for i in range(3): A = A + 1 for i in", "A + 4 A = A + 9 A = A + 5", "3 A = A + 3 A = A + 6 for i", "for i in range(1): A = A + 8 for i in range(8):", "range(5): A = A + 4 for i in range(4): A = A", "in range(9): for i in range(7): A = A + 5 for i", "9 for i in range(9): A = A + 4 A = A", "A = A + 5 A = A + 6 A = A", "i in range(8): A = A + 9 A = A + 6", "+ 8 for i in range(4): for i in range(4): A = A", "= A + 4 for i in range(7): A = A + 8", "A + 1 A = A + 1 A = A + 3", "for i in range(2): A = A + 3 for i in range(1):", "A + 2 for i in range(5): A = A + 7 A", "A + 5 A = A + 4 A = A + 2", "in range(3): for i in range(2): A = A + 1 A =", "+ 8 A = A + 3 for i in range(5): A =", "+ 1 for i in range(2): A = A + 2 A =", "range(7): A = A + 5 for i in range(3): A = A", "+ 1 for i in range(6): A = A + 4 A =", "A + 2 A = A + 8 A = A + 3", "A = A + 9 A = A + 8 for i in", "range(7): A = A + 2 for i in range(4): A = A", "for i in range(2): for i in range(7): A = A + 3", "range(4): A = A + 7 A = A + 2 for i", "7 A = A + 1 for i in range(6): A = A", "i in range(5): A = A + 2 A = A + 1", "range(2): A = A + 6 for i in range(6): A = A", "= A + 6 A = A + 4 A = A +", "i in range(4): for i in range(7): A = A + 1 for", "= A + 9 for i in range(7): A = A + 2", "i in range(9): for i in range(2): A = A + 3 A", "A + 6 for i in range(5): for i in range(6): A =", "9 A = A + 6 A = A + 3 for i", "range(2): A = A + 6 A = A + 7 A =", "A + 9 A = A + 5 A = A + 6", "+ 1 A = A + 4 for i in range(6): A =", "3 A = A + 9 A = A + 7 for i", "2 for i in range(9): for i in range(3): A = A +", "A + 3 for i in range(5): for i in range(6): A =", "A + 4 for i in range(3): A = A + 3 A", "in range(9): A = A + 5 A = A + 5 for", "2 A = A + 8 for i in range(2): A = A", "for i in range(1): A = A + 3 for i in range(1):", "+ 9 A = A + 7 for i in range(9): A =", "= A + 5 A = A + 3 A = A +", "= A + 2 A = A + 8 for i in range(2):", "+ 7 A = A + 7 A = A + 3 A", "+ 7 A = A + 4 for i in range(8): A =", "= A + 5 A = A + 9 for i in range(2):", "+ 9 for i in range(3): for i in range(3): A = A", "A + 6 for i in range(7): for i in range(6): for i", "range(5): for i in range(4): A = A + 5 A = A", "A + 9 A = A + 3 for i in range(9): A", "range(5): A = A + 5 for i in range(1): A = A", "range(6): A = A + 1 A = A + 6 for i", "A + 4 for i in range(2): A = A + 7 for", "A + 5 A = A + 1 A = A + 2", "= A + 5 for i in range(2): A = A + 5", "i in range(7): A = A + 7 for i in range(7): A", "A + 1 A = A + 2 for i in range(4): A", "A = A + 5 for i in range(4): A = A +", "= A + 5 A = A + 4 for i in range(4):", "= A + 1 A = A + 5 for i in range(2):", "in range(6): A = A + 3 for i in range(2): A =", "+ 5 A = A + 4 for i in range(4): for i", "in range(7): for i in range(7): for i in range(5): A = A", "i in range(9): for i in range(5): A = A + 6 A", "7 A = A + 4 A = A + 5 A =", "in range(9): A = A + 2 A = A + 8 A", "+ 6 A = A + 1 for i in range(7): A =", "i in range(1): A = A + 8 A = A + 7", "+ 2 A = A + 2 A = A + 7 for", "= A + 3 for i in range(9): A = A + 9", "range(9): A = A + 5 A = A + 4 for i", "8 for i in range(2): A = A + 7 for i in", "3 for i in range(2): A = A + 4 for i in", "in range(2): A = A + 4 A = A + 7 for", "2 for i in range(5): A = A + 8 for i in", "range(1): A = A + 4 for i in range(3): A = A", "range(3): A = A + 3 A = A + 1 A =", "in range(1): for i in range(8): A = A + 5 for i", "= A + 5 for i in range(9): for i in range(7): A", "i in range(8): for i in range(2): A = A + 6 A", "i in range(4): A = A + 7 A = A + 2", "range(2): A = A + 6 for i in range(1): for i in", "+ 5 for i in range(3): A = A + 1 for i", "in range(9): A = A + 4 A = A + 1 for", "range(5): A = A + 9 A = A + 1 A =", "for i in range(7): A = A + 5 for i in range(3):", "A + 3 A = A + 6 for i in range(9): A", "A = A + 9 A = A + 5 A = A", "A + 2 A = A + 1 A = A + 8", "8 A = A + 5 for i in range(1): A = A", "+ 4 for i in range(8): A = A + 5 A =", "A + 8 for i in range(6): A = A + 6 for", "in range(8): A = A + 2 A = A + 4 for", "7 for i in range(3): A = A + 9 A = A", "+ 3 A = A + 3 A = A + 1 A", "A = A + 2 A = A + 8 for i in", "range(4): for i in range(4): A = A + 1 A = A", "for i in range(4): for i in range(4): A = A + 8", "for i in range(8): A = A + 7 A = A +", "i in range(6): A = A + 7 A = A + 7", "= A + 2 for i in range(9): A = A + 5", "A = A + 5 A = A + 1 for i in", "range(9): A = A + 5 for i in range(1): A = A", "= A + 9 A = A + 2 for i in range(8):", "4 A = A + 2 A = A + 2 A =", "A + 5 for i in range(1): A = A + 4 for", "in range(7): A = A + 6 for i in range(5): A =", "A + 5 A = A + 8 for i in range(2): A", "in range(7): A = A + 2 for i in range(4): A =", "range(5): A = A + 4 A = A + 8 A =", "A = A + 8 A = A + 7 A = A", "in range(1): A = A + 2 A = A + 8 for", "range(4): for i in range(2): A = A + 3 for i in", "9 A = A + 7 for i in range(5): A = A", "2 A = A + 2 A = A + 5 for i", "range(2): A = A + 1 A = A + 5 A =", "A + 1 for i in range(1): A = A + 1 for", "A + 4 A = A + 1 A = A + 7", "5 for i in range(3): A = A + 5 for i in", "A + 6 A = A + 2 for i in range(5): A", "for i in range(1): A = A + 5 for i in range(7):", "A + 6 for i in range(5): A = A + 3 A", "range(6): A = A + 5 A = A + 1 for i", "= A + 7 for i in range(4): A = A + 7", "4 A = A + 2 A = A + 4 for i", "A + 3 A = A + 9 A = A + 6", "+ 3 A = A + 6 for i in range(5): A =", "range(7): A = A + 3 A = A + 9 A =", "4 A = A + 6 for i in range(1): A = A", "A = A + 4 A = A + 8 for i in", "+ 1 A = A + 7 A = A + 3 for", "range(6): A = A + 5 A = A + 3 A =", "4 for i in range(4): for i in range(9): A = A +", "i in range(4): for i in range(1): A = A + 7 for", "= A + 6 A = A + 2 for i in range(5):", "+ 6 A = A + 2 A = A + 2 A", "A + 6 for i in range(4): A = A + 8 A", "+ 2 A = A + 4 A = A + 7 for", "range(7): for i in range(4): A = A + 6 A = A", "= A + 2 for i in range(5): A = A + 7", "= A + 9 A = A + 5 for i in range(3):", "2 A = A + 5 A = A + 2 A =", "+ 7 A = A + 7 A = A + 7 for", "6 for i in range(4): A = A + 9 A = A", "A + 8 A = A + 8 A = A + 1", "6 for i in range(1): A = A + 3 A = A", "i in range(2): A = A + 3 for i in range(8): A", "+ 5 for i in range(3): for i in range(5): A = A", "7 for i in range(5): A = A + 3 for i in", "= A + 6 for i in range(9): A = A + 6", "range(6): A = A + 5 A = A + 5 for i", "for i in range(3): A = A + 7 A = A +", "in range(7): for i in range(2): A = A + 3 A =", "+ 9 A = A + 2 A = A + 5 for", "= A + 2 for i in range(5): A = A + 4", "1 A = A + 3 A = A + 8 for i", "A = A + 2 for i in range(7): for i in range(4):", "1 for i in range(5): A = A + 1 for i in", "= 0 for i in range(2): A = A + 4 for i", "+ 3 for i in range(6): for i in range(2): A = A", "+ 9 for i in range(9): for i in range(2): A = A", "i in range(4): A = A + 1 A = A + 6", "9 A = A + 3 A = A + 2 for i", "i in range(6): A = A + 5 A = A + 3", "7 for i in range(9): for i in range(6): A = A +", "5 for i in range(9): for i in range(7): A = A +", "+ 7 A = A + 2 for i in range(5): A =", "+ 4 A = A + 6 for i in range(7): A =", "4 for i in range(8): A = A + 9 for i in", "1 A = A + 5 for i in range(9): A = A", "A + 7 for i in range(7): for i in range(4): A =", "2 for i in range(9): for i in range(5): A = A +", "A = A + 7 for i in range(5): for i in range(4):", "i in range(1): A = A + 7 for i in range(4): A", "range(8): A = A + 6 for i in range(4): A = A", "range(9): A = A + 3 A = A + 6 for i", "range(6): for i in range(1): A = A + 3 for i in", "+ 3 A = A + 1 A = A + 8 A", "+ 9 A = A + 3 for i in range(4): A =", "3 A = A + 5 for i in range(3): A = A", "A = 0 for i in range(2): A = A + 4 for", "i in range(7): A = A + 4 for i in range(6): A", "in range(9): A = A + 7 A = A + 8 for", "+ 3 A = A + 9 A = A + 6 A", "+ 8 for i in range(4): A = A + 7 A =", "= A + 7 for i in range(9): A = A + 6", "4 A = A + 9 for i in range(1): A = A", "+ 1 A = A + 6 A = A + 5 A", "i in range(4): A = A + 2 for i in range(2): A", "A + 4 for i in range(6): A = A + 9 for", "range(3): A = A + 9 A = A + 1 for i", "in range(1): A = A + 6 for i in range(2): A =", "= A + 8 A = A + 5 A = A +", "7 for i in range(1): A = A + 7 for i in", "range(4): A = A + 6 A = A + 3 for i", "+ 1 A = A + 6 for i in range(5): A =", "for i in range(4): A = A + 9 A = A +", "8 A = A + 2 A = A + 5 for i", "+ 1 for i in range(6): A = A + 2 A =", "in range(2): A = A + 3 for i in range(1): A =", "in range(2): A = A + 4 for i in range(2): A =", "2 for i in range(4): A = A + 6 A = A", "range(1): for i in range(1): A = A + 1 for i in", "A = A + 2 A = A + 4 A = A", "= A + 4 A = A + 6 for i in range(5):", "range(4): A = A + 9 for i in range(5): A = A", "7 for i in range(8): for i in range(1): A = A +", "9 A = A + 7 for i in range(7): for i in", "2 for i in range(3): A = A + 4 A = A", "+ 1 for i in range(5): A = A + 6 for i", "A = A + 3 A = A + 2 for i in", "i in range(5): A = A + 4 for i in range(4): A", "A = A + 3 A = A + 8 for i in", "A + 2 A = A + 8 A = A + 4", "in range(3): A = A + 4 for i in range(7): A =", "in range(2): A = A + 3 for i in range(3): A =", "5 for i in range(9): A = A + 1 A = A", "8 A = A + 4 for i in range(3): for i in", "9 for i in range(5): A = A + 5 for i in", "A + 5 A = A + 2 A = A + 1", "A + 9 A = A + 5 for i in range(3): A", "range(9): A = A + 6 for i in range(1): for i in", "4 for i in range(6): A = A + 2 for i in", "range(8): A = A + 2 for i in range(8): for i in", "7 for i in range(6): A = A + 7 for i in", "+ 8 for i in range(2): A = A + 4 A =", "+ 5 A = A + 5 A = A + 4 for", "A + 4 for i in range(2): A = A + 3 A", "in range(4): A = A + 8 A = A + 7 A", "range(9): A = A + 4 A = A + 1 for i", "in range(3): for i in range(6): A = A + 8 A =", "for i in range(1): for i in range(7): for i in range(2): A", "A + 5 A = A + 8 A = A + 7", "i in range(1): A = A + 4 A = A + 4", "A = A + 2 A = A + 9 A = A", "A + 4 for i in range(4): A = A + 6 A", "+ 6 A = A + 4 A = A + 9 A", "+ 2 A = A + 9 A = A + 1 A", "5 for i in range(8): for i in range(6): A = A +", "A = A + 5 for i in range(2): A = A +", "i in range(5): A = A + 4 A = A + 8", "5 A = A + 3 A = A + 5 A =", "= A + 6 A = A + 2 A = A +", "for i in range(4): A = A + 8 A = A +", "i in range(1): A = A + 2 A = A + 8", "+ 2 for i in range(6): for i in range(1): A = A", "i in range(9): A = A + 2 A = A + 9", "+ 2 A = A + 8 A = A + 3 A", "+ 4 for i in range(2): A = A + 3 for i", "= A + 9 A = A + 4 for i in range(2):", "5 for i in range(4): A = A + 5 A = A", "A = A + 4 A = A + 7 A = A", "4 for i in range(3): A = A + 3 A = A", "i in range(1): A = A + 3 A = A + 3", "2 for i in range(8): for i in range(6): A = A +", "+ 6 for i in range(9): A = A + 3 A =", "A = A + 6 for i in range(3): A = A +", "2 A = A + 8 A = A + 2 A =", "= A + 1 A = A + 4 for i in range(6):", "A + 8 A = A + 9 A = A + 5", "+ 8 A = A + 3 A = A + 5 A", "in range(3): A = A + 9 for i in range(1): A =", "+ 7 for i in range(9): A = A + 8 for i", "A + 3 for i in range(1): A = A + 2 A", "in range(8): A = A + 5 for i in range(3): A =", "+ 8 A = A + 4 for i in range(8): for i", "A + 3 A = A + 9 A = A + 4", "range(3): A = A + 4 A = A + 4 for i", "in range(1): A = A + 6 A = A + 4 A", "+ 5 A = A + 6 for i in range(7): for i", "A + 9 A = A + 4 A = A + 7", "A = A + 3 A = A + 5 A = A", "+ 2 A = A + 2 A = A + 1 for", "for i in range(3): for i in range(5): A = A + 4", "= A + 3 for i in range(5): A = A + 9", "+ 1 A = A + 7 A = A + 4 for", "in range(9): A = A + 4 A = A + 3 for", "4 A = A + 7 for i in range(5): A = A", "A = A + 7 for i in range(5): A = A +", "A + 8 for i in range(5): A = A + 6 A", "+ 4 A = A + 2 for i in range(9): for i", "2 for i in range(6): A = A + 3 for i in", "6 A = A + 6 A = A + 4 for i", "range(4): A = A + 7 A = A + 1 for i", "7 for i in range(8): A = A + 2 for i in", "for i in range(6): A = A + 7 A = A +", "+ 1 A = A + 7 A = A + 7 A", "3 A = A + 9 A = A + 6 A =", "A + 6 A = A + 9 for i in range(1): A", "3 for i in range(9): A = A + 2 A = A", "for i in range(3): A = A + 9 A = A +", "i in range(3): for i in range(4): A = A + 9 for", "9 for i in range(5): A = A + 2 A = A", "A + 5 for i in range(1): for i in range(8): A =", "A + 5 A = A + 3 A = A + 5", "A + 1 A = A + 9 A = A + 5", "+ 5 A = A + 6 A = A + 2 for", "in range($1) A = 0 for i in range(2): A = A +", "+ 5 A = A + 5 for i in range(4): A =", "2 for i in range(1): A = A + 6 A = A", "+ 7 A = A + 7 A = A + 8 A", "in range(8): A = A + 6 for i in range(4): A =", "i in range(5): for i in range(1): A = A + 4 A", "+ 9 A = A + 1 for i in range(6): A =", "i in range(4): A = A + 2 A = A + 7", "i in range(5): A = A + 6 A = A + 5", "3 for i in range(8): A = A + 7 A = A", "A + 4 A = A + 4 for i in range(6): A", "A + 3 A = A + 6 for i in range(1): A", "7 A = A + 7 for i in range(7): A = A", "6 A = A + 5 for i in range(1): for i in", "i in range(1): for i in range(2): A = A + 4 A", "7 A = A + 8 A = A + 3 for i", "= A + 4 A = A + 4 for i in range(6):", "in range(6): A = A + 4 A = A + 9 A", "in range(2): A = A + 6 for i in range(6): A =", "+ 4 for i in range(9): A = A + 3 A =", "A + 7 A = A + 1 A = A + 5", "A + 4 for i in range(8): A = A + 9 A", "7 A = A + 5 A = A + 5 A =", "= A + 3 for i in range(4): A = A + 5", "= A + 7 for i in range(7): for i in range(7): for", "A + 1 A = A + 5 for i in range(9): A", "= A + 6 for i in range(3): A = A + 7", "i in range(9): A = A + 3 A = A + 6", "for i in range(6): A = A + 7 for i in range(7):", "1 for i in range(8): A = A + 4 A = A", "i in range(4): for i in range(4): A = A + 1 A", "= A + 2 for i in range(5): A = A + 8", "2 A = A + 1 A = A + 4 for i", "5 for i in range(4): A = A + 7 A = A" ]
[ "cluster', url='http://github.com/nuodb/nuodb-aws-quickstart', author='<NAME>.', author_email='<EMAIL>', #data_files=[('nuodbawsquickstart/templates', ['nuodbawsquickstart/templates/init.py'])], install_requires=[\"argparse\", \"boto\", \"requests\"], license='BSD licence, see LICENSE',", "author_email='<EMAIL>', #data_files=[('nuodbawsquickstart/templates', ['nuodbawsquickstart/templates/init.py'])], install_requires=[\"argparse\", \"boto\", \"requests\"], license='BSD licence, see LICENSE', packages=['nuodbawsquickstart'], scripts=[\"bin/nuodb_aws_quickstart.py\"], zip_safe=True)", "to deploy a multi-region and multi-instance AWS cluster', url='http://github.com/nuodb/nuodb-aws-quickstart', author='<NAME>.', author_email='<EMAIL>', #data_files=[('nuodbawsquickstart/templates', ['nuodbawsquickstart/templates/init.py'])],", "a multi-region and multi-instance AWS cluster', url='http://github.com/nuodb/nuodb-aws-quickstart', author='<NAME>.', author_email='<EMAIL>', #data_files=[('nuodbawsquickstart/templates', ['nuodbawsquickstart/templates/init.py'])], install_requires=[\"argparse\", \"boto\",", "and multi-instance AWS cluster', url='http://github.com/nuodb/nuodb-aws-quickstart', author='<NAME>.', author_email='<EMAIL>', #data_files=[('nuodbawsquickstart/templates', ['nuodbawsquickstart/templates/init.py'])], install_requires=[\"argparse\", \"boto\", \"requests\"], license='BSD", "AWS cluster', url='http://github.com/nuodb/nuodb-aws-quickstart', author='<NAME>.', author_email='<EMAIL>', #data_files=[('nuodbawsquickstart/templates', ['nuodbawsquickstart/templates/init.py'])], install_requires=[\"argparse\", \"boto\", \"requests\"], license='BSD licence, see", "from setuptools import setup import sys setup(name='nuodbawsquickstart', version='1.1.0', description='Script to deploy a multi-region", "url='http://github.com/nuodb/nuodb-aws-quickstart', author='<NAME>.', author_email='<EMAIL>', #data_files=[('nuodbawsquickstart/templates', ['nuodbawsquickstart/templates/init.py'])], install_requires=[\"argparse\", \"boto\", \"requests\"], license='BSD licence, see LICENSE', packages=['nuodbawsquickstart'],", "version='1.1.0', description='Script to deploy a multi-region and multi-instance AWS cluster', url='http://github.com/nuodb/nuodb-aws-quickstart', author='<NAME>.', author_email='<EMAIL>',", "author='<NAME>.', author_email='<EMAIL>', #data_files=[('nuodbawsquickstart/templates', ['nuodbawsquickstart/templates/init.py'])], install_requires=[\"argparse\", \"boto\", \"requests\"], license='BSD licence, see LICENSE', packages=['nuodbawsquickstart'], scripts=[\"bin/nuodb_aws_quickstart.py\"],", "setup import sys setup(name='nuodbawsquickstart', version='1.1.0', description='Script to deploy a multi-region and multi-instance AWS", "import sys setup(name='nuodbawsquickstart', version='1.1.0', description='Script to deploy a multi-region and multi-instance AWS cluster',", "sys setup(name='nuodbawsquickstart', version='1.1.0', description='Script to deploy a multi-region and multi-instance AWS cluster', url='http://github.com/nuodb/nuodb-aws-quickstart',", "import setup import sys setup(name='nuodbawsquickstart', version='1.1.0', description='Script to deploy a multi-region and multi-instance", "description='Script to deploy a multi-region and multi-instance AWS cluster', url='http://github.com/nuodb/nuodb-aws-quickstart', author='<NAME>.', author_email='<EMAIL>', #data_files=[('nuodbawsquickstart/templates',", "setuptools import setup import sys setup(name='nuodbawsquickstart', version='1.1.0', description='Script to deploy a multi-region and", "deploy a multi-region and multi-instance AWS cluster', url='http://github.com/nuodb/nuodb-aws-quickstart', author='<NAME>.', author_email='<EMAIL>', #data_files=[('nuodbawsquickstart/templates', ['nuodbawsquickstart/templates/init.py'])], install_requires=[\"argparse\",", "multi-region and multi-instance AWS cluster', url='http://github.com/nuodb/nuodb-aws-quickstart', author='<NAME>.', author_email='<EMAIL>', #data_files=[('nuodbawsquickstart/templates', ['nuodbawsquickstart/templates/init.py'])], install_requires=[\"argparse\", \"boto\", \"requests\"],", "multi-instance AWS cluster', url='http://github.com/nuodb/nuodb-aws-quickstart', author='<NAME>.', author_email='<EMAIL>', #data_files=[('nuodbawsquickstart/templates', ['nuodbawsquickstart/templates/init.py'])], install_requires=[\"argparse\", \"boto\", \"requests\"], license='BSD licence,", "setup(name='nuodbawsquickstart', version='1.1.0', description='Script to deploy a multi-region and multi-instance AWS cluster', url='http://github.com/nuodb/nuodb-aws-quickstart', author='<NAME>.'," ]
[]
[ "distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "else request.scheme if request.scheme \\ else None return self.oauth.authorize(callback=url_for( 'oauth_callback', _scheme=scheme, _external=True), state=request.args.get('next')", "= session.query(models.User).filter( models.User.username == username).first() if not authorized: if user: session.delete(user) session.commit() return", "response from OAuth client Returns ------- (bool, bool, bool) Return if 1. the", "from airflow.utils.db import provide_session from airflow.utils.log.logging_mixin import LoggingMixin import os, ssl if (os.environ.get('PYTHONHTTPSVERIFY',", "resp.data def dict_get(self, dic, key): keys = key.split(\".\") value = dic for k", "License for the # specific language governing permissions and limitations # under the", "from airflow import models, configuration from airflow.utils.db import provide_session from airflow.utils.log.logging_mixin import LoggingMixin", "models.User.id == int(userid)).first() return OAuthUser(user) def authorize(self, authorized_response, user_info): \"\"\" Parameters ---------- authorized_response", "self.oauth = OAuth(self.flask_app).remote_app( 'oauth', consumer_key=get_config_param('client_id'), consumer_secret=get_config_param('client_secret'), base_url=get_config_param('base_url'), request_token_params={'scope': [ \"user:info\", \"user:check-access\" ]}, request_token_url=None,", "and getattr(ssl, '_create_unverified_context', None)): ssl._create_default_https_context = ssl._create_unverified_context log = LoggingMixin().log def get_config_param(param): return", "self.oauth.authorized_response() try: if resp is None: raise AuthenticationError( 'Null response from OAuth service,", "(ASF) under one # or more contributor license agreements. See the NOTICE file", "'None')) return resp.data def dict_get(self, dic, key): keys = key.split(\".\") value = dic", "userid or userid == 'None': return None user = session.query(models.User).filter( models.User.id == int(userid)).first()", "'0' and getattr(ssl, '_create_unverified_context', None)): ssl._create_default_https_context = ssl._create_unverified_context log = LoggingMixin().log def get_config_param(param):", "class OAuthUser(models.User): def __init__(self, user): self.user = user @property def is_active(self): \"\"\"Required by", "email_key) authorized, superuser = self.authorize(resp, user_info) except AuthenticationError: return redirect(url_for('airflow.noaccess')) user = session.query(models.User).filter(", "importlib import import_module import flask_login from flask import url_for, redirect, request # Need", "= flask_login.LoginManager() self.login_manager.login_view = 'airflow.login' self.login_manager.login_message = None self.flask_app = None self.oauth =", "key): keys = key.split(\".\") value = dic for k in keys: value =", "software distributed under the License is distributed on an # \"AS IS\" BASIS,", "if self.user else False class AuthenticationError(Exception): pass class OAuthBackend(object): def __init__(self): self.login_manager =", "flask_login import current_user, logout_user, login_required, login_user from flask_oauthlib.client import OAuth from airflow import", "OAuth from airflow import models, configuration from airflow.utils.db import provide_session from airflow.utils.log.logging_mixin import", "self.oauth_callback) def login(self, request): log.debug('Redirecting user to OAuth login') scheme = request.environ['HTTP_X_FORWARDED_PROTO'] \\", "= self.oauth.authorized_response() try: if resp is None: raise AuthenticationError( 'Null response from OAuth", "dic, key): keys = key.split(\".\") value = dic for k in keys: value", "is allowed to access airflow, 2. if the user is a superuser \"\"\"", "return self.user.superuser if self.user else False class AuthenticationError(Exception): pass class OAuthBackend(object): def __init__(self):", "OAuthUser(models.User): def __init__(self, user): self.user = user @property def is_active(self): \"\"\"Required by flask_login\"\"\"", "get_config_param(\"user_info_url\"), token=(access_token, '')) if not resp or resp.status != 200: raise AuthenticationError( 'Failed", "return self.user.get_id() def data_profiling(self): \"\"\"Provides access to data profiling tools\"\"\" return self.user.superuser if", "def data_profiling(self): \"\"\"Provides access to data profiling tools\"\"\" return self.user.superuser if self.user else", "'HTTP_X_FORWARDED_PROTO' in request.environ and request.environ['HTTP_X_FORWARDED_PROTO'] \\ else request.scheme if request.scheme \\ else None", "authorized, superuser = self.authorize(resp, user_info) except AuthenticationError: return redirect(url_for('airflow.noaccess')) user = session.query(models.User).filter( models.User.username", "username=username, email=email, superuser=superuser) user.superuser = superuser session.merge(user) session.commit() login_user(OAuthUser(user)) session.commit() return redirect(next_url) login_manager", "airflow.utils.log.logging_mixin import LoggingMixin import os, ssl if (os.environ.get('PYTHONHTTPSVERIFY', '') is '0' and getattr(ssl,", "def __init__(self): self.login_manager = flask_login.LoginManager() self.login_manager.login_view = 'airflow.login' self.login_manager.login_message = None self.flask_app =", "response from OAuth client user_info: dict User information response from OAuth client Returns", "\"\"\" Parameters ---------- authorized_response Authorized response from OAuth client user_info: dict User information", "self.user = user @property def is_active(self): \"\"\"Required by flask_login\"\"\" return self.user @property def", "log = LoggingMixin().log def get_config_param(param): return str(configuration.conf.get('oauth', param)) def has_config_param(param): return configuration.conf.has_option('oauth', param)", "to OAuth login') scheme = request.environ['HTTP_X_FORWARDED_PROTO'] \\ if 'HTTP_X_FORWARDED_PROTO' in request.environ and request.environ['HTTP_X_FORWARDED_PROTO']", "provide_session from airflow.utils.log.logging_mixin import LoggingMixin import os, ssl if (os.environ.get('PYTHONHTTPSVERIFY', '') is '0'", "next_url = request.args.get('state') or url_for('admin.index') if get_config_param('base_url') in next_url: next_url = url_for('admin.index') resp", "= key.split(\".\") value = dic for k in keys: value = value[k] return", "under the License is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES", "self.user @property def is_authenticated(self): \"\"\"Required by flask_login\"\"\" return self.user @property def is_anonymous(self): \"\"\"Required", "additional information # regarding copyright ownership. The ASF licenses this file # to", "raise AuthenticationError( 'Null response from OAuth service, denying access.' ) access_token = resp['access_token']", "# \"License\"); you may not use this file except in compliance # with", "import flask_login from flask import url_for, redirect, request # Need to expose these", "resp else 'None')) return resp.data def dict_get(self, dic, key): keys = key.split(\".\") value", "Licensed to the Apache Software Foundation (ASF) under one # or more contributor", "access_token_url=get_config_param('access_token_url'), authorize_url=get_config_param('authorize_url')) self.login_manager.user_loader(self.load_user) self.flask_app.add_url_rule(get_config_param('oauth_callback_route'), 'oauth_callback', self.oauth_callback) def login(self, request): log.debug('Redirecting user to OAuth", "or more contributor license agreements. See the NOTICE file # distributed with this", "noinspection PyUnresolvedReferences from flask_login import current_user, logout_user, login_required, login_user from flask_oauthlib.client import OAuth", "[ \"user:info\", \"user:check-access\" ]}, request_token_url=None, access_token_method=get_config_param('access_token_method'), access_token_url=get_config_param('access_token_url'), authorize_url=get_config_param('authorize_url')) self.login_manager.user_loader(self.load_user) self.flask_app.add_url_rule(get_config_param('oauth_callback_route'), 'oauth_callback', self.oauth_callback) def", "OR CONDITIONS OF ANY # KIND, either express or implied. See the License", "Foundation (ASF) under one # or more contributor license agreements. See the NOTICE", "Apache Software Foundation (ASF) under one # or more contributor license agreements. See", "configuration.conf.has_option('oauth', param) class OAuthUser(models.User): def __init__(self, user): self.user = user @property def is_active(self):", "\"\"\"Required by flask_login\"\"\" return self.user @property def is_authenticated(self): \"\"\"Required by flask_login\"\"\" return self.user", "has_config_param(param): return configuration.conf.has_option('oauth', param) class OAuthUser(models.User): def __init__(self, user): self.user = user @property", "return None user = session.query(models.User).filter( models.User.id == int(userid)).first() return OAuthUser(user) def authorize(self, authorized_response,", "False class AuthenticationError(Exception): pass class OAuthBackend(object): def __init__(self): self.login_manager = flask_login.LoginManager() self.login_manager.login_view =", "response from OAuth service, denying access.' ) access_token = resp['access_token'] user_info = self.get_user_profile_info(access_token)", "fetch user profile, status ({0})'.format( resp.status if resp else 'None')) return resp.data def", "in compliance # with the License. You may obtain a copy of the", "implied. See the License for the # specific language governing permissions and limitations", "url_for, redirect, request # Need to expose these downstream # flake8: noqa: F401", "OAuthUser(user) def authorize(self, authorized_response, user_info): \"\"\" Parameters ---------- authorized_response Authorized response from OAuth", "access_token): resp = self.oauth.get( get_config_param(\"user_info_url\"), token=(access_token, '')) if not resp or resp.status !=", "or agreed to in writing, # software distributed under the License is distributed", "user): self.user = user @property def is_active(self): \"\"\"Required by flask_login\"\"\" return self.user @property", "'')) if not resp or resp.status != 200: raise AuthenticationError( 'Failed to fetch", "if has_config_param(\"oauth_permission_backend\"): permission_backend = import_module(get_config_param(\"oauth_permission_backend\")) return permission_backend.authorize(self.oauth, authorized_response, user_info) return True, True @provide_session", "resp.status != 200: raise AuthenticationError( 'Failed to fetch user profile, status ({0})'.format( resp.status", "def get_config_param(param): return str(configuration.conf.get('oauth', param)) def has_config_param(param): return configuration.conf.has_option('oauth', param) class OAuthUser(models.User): def", "status ({0})'.format( resp.status if resp else 'None')) return resp.data def dict_get(self, dic, key):", "is None: raise AuthenticationError( 'Null response from OAuth service, denying access.' ) access_token", "None def init_app(self, flask_app): self.flask_app = flask_app self.login_manager.init_app(self.flask_app) self.oauth = OAuth(self.flask_app).remote_app( 'oauth', consumer_key=get_config_param('client_id'),", "tools\"\"\" return self.user.superuser if self.user else False def is_superuser(self): \"\"\"Access all the things\"\"\"", "dict User information response from OAuth client Returns ------- (bool, bool, bool) Return", "flask_app self.login_manager.init_app(self.flask_app) self.oauth = OAuth(self.flask_app).remote_app( 'oauth', consumer_key=get_config_param('client_id'), consumer_secret=get_config_param('client_secret'), base_url=get_config_param('base_url'), request_token_params={'scope': [ \"user:info\", \"user:check-access\"", "license agreements. See the NOTICE file # distributed with this work for additional", "and limitations # under the License. from importlib import import_module import flask_login from", "\"License\"); you may not use this file except in compliance # with the", "pass class OAuthBackend(object): def __init__(self): self.login_manager = flask_login.LoginManager() self.login_manager.login_view = 'airflow.login' self.login_manager.login_message =", "raise AuthenticationError( 'Failed to fetch user profile, status ({0})'.format( resp.status if resp else", "if 1. the user is allowed to access airflow, 2. if the user", "flask_app): self.flask_app = flask_app self.login_manager.init_app(self.flask_app) self.oauth = OAuth(self.flask_app).remote_app( 'oauth', consumer_key=get_config_param('client_id'), consumer_secret=get_config_param('client_secret'), base_url=get_config_param('base_url'), request_token_params={'scope':", "resp['access_token'] user_info = self.get_user_profile_info(access_token) username_key = get_config_param(\"username_key\") email_key = get_config_param(\"email_key\") username = self.dict_get(user_info,", "flask_login.LoginManager() self.login_manager.login_view = 'airflow.login' self.login_manager.login_message = None self.flask_app = None self.oauth = None", "either express or implied. See the License for the # specific language governing", "redirect, request # Need to expose these downstream # flake8: noqa: F401 #", "user_info) except AuthenticationError: return redirect(url_for('airflow.noaccess')) user = session.query(models.User).filter( models.User.username == username).first() if not", "the things\"\"\" return self.user.superuser if self.user else False class AuthenticationError(Exception): pass class OAuthBackend(object):", "permission_backend = import_module(get_config_param(\"oauth_permission_backend\")) return permission_backend.authorize(self.oauth, authorized_response, user_info) return True, True @provide_session def oauth_callback(self,", "return redirect(url_for('airflow.noaccess')) if not user: user = models.User( username=username, email=email, superuser=superuser) user.superuser =", "not use this file except in compliance # with the License. You may", "airflow import models, configuration from airflow.utils.db import provide_session from airflow.utils.log.logging_mixin import LoggingMixin import", "self.user else False def is_superuser(self): \"\"\"Access all the things\"\"\" return self.user.superuser if self.user", "airflow.utils.db import provide_session from airflow.utils.log.logging_mixin import LoggingMixin import os, ssl if (os.environ.get('PYTHONHTTPSVERIFY', '')", "def is_anonymous(self): \"\"\"Required by flask_login\"\"\" return False def get_id(self): \"\"\"Returns the current user", "= self.dict_get(user_info, email_key) authorized, superuser = self.authorize(resp, user_info) except AuthenticationError: return redirect(url_for('airflow.noaccess')) user", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "# or more contributor license agreements. See the NOTICE file # distributed with", "# under the License. from importlib import import_module import flask_login from flask import", "import os, ssl if (os.environ.get('PYTHONHTTPSVERIFY', '') is '0' and getattr(ssl, '_create_unverified_context', None)): ssl._create_default_https_context", "get_id(self): \"\"\"Returns the current user id as required by flask_login\"\"\" return self.user.get_id() def", "as required by flask_login\"\"\" return self.user.get_id() def data_profiling(self): \"\"\"Provides access to data profiling", "WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the", "client Returns ------- (bool, bool, bool) Return if 1. the user is allowed", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "self.user.get_id() def data_profiling(self): \"\"\"Provides access to data profiling tools\"\"\" return self.user.superuser if self.user", "= superuser session.merge(user) session.commit() login_user(OAuthUser(user)) session.commit() return redirect(next_url) login_manager = OAuthBackend() def login(self,", "flask import url_for, redirect, request # Need to expose these downstream # flake8:", "int(userid)).first() return OAuthUser(user) def authorize(self, authorized_response, user_info): \"\"\" Parameters ---------- authorized_response Authorized response", "# regarding copyright ownership. The ASF licenses this file # to you under", "None self.flask_app = None self.oauth = None self.api_rev = None def init_app(self, flask_app):", "in request.environ and request.environ['HTTP_X_FORWARDED_PROTO'] \\ else request.scheme if request.scheme \\ else None return", "session.delete(user) session.commit() return redirect(url_for('airflow.noaccess')) if not user: user = models.User( username=username, email=email, superuser=superuser)", "more contributor license agreements. See the NOTICE file # distributed with this work", "by flask_login\"\"\" return self.user.get_id() def data_profiling(self): \"\"\"Provides access to data profiling tools\"\"\" return", "the user is a superuser \"\"\" if has_config_param(\"oauth_permission_backend\"): permission_backend = import_module(get_config_param(\"oauth_permission_backend\")) return permission_backend.authorize(self.oauth,", "configuration from airflow.utils.db import provide_session from airflow.utils.log.logging_mixin import LoggingMixin import os, ssl if", "_scheme=scheme, _external=True), state=request.args.get('next') or request.referrer or None) def get_user_profile_info(self, access_token): resp = self.oauth.get(", "import url_for, redirect, request # Need to expose these downstream # flake8: noqa:", "request.environ and request.environ['HTTP_X_FORWARDED_PROTO'] \\ else request.scheme if request.scheme \\ else None return self.oauth.authorize(callback=url_for(", "get_config_param(\"email_key\") username = self.dict_get(user_info, username_key) email = self.dict_get(user_info, email_key) authorized, superuser = self.authorize(resp,", "is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "key.split(\".\") value = dic for k in keys: value = value[k] return value", "from OAuth service, denying access.' ) access_token = resp['access_token'] user_info = self.get_user_profile_info(access_token) username_key", "has_config_param(\"oauth_permission_backend\"): permission_backend = import_module(get_config_param(\"oauth_permission_backend\")) return permission_backend.authorize(self.oauth, authorized_response, user_info) return True, True @provide_session def", "user.superuser = superuser session.merge(user) session.commit() login_user(OAuthUser(user)) session.commit() return redirect(next_url) login_manager = OAuthBackend() def", "self.dict_get(user_info, email_key) authorized, superuser = self.authorize(resp, user_info) except AuthenticationError: return redirect(url_for('airflow.noaccess')) user =", "Parameters ---------- authorized_response Authorized response from OAuth client user_info: dict User information response", "resp = self.oauth.get( get_config_param(\"user_info_url\"), token=(access_token, '')) if not resp or resp.status != 200:", "LoggingMixin import os, ssl if (os.environ.get('PYTHONHTTPSVERIFY', '') is '0' and getattr(ssl, '_create_unverified_context', None)):", "request.environ['HTTP_X_FORWARDED_PROTO'] \\ else request.scheme if request.scheme \\ else None return self.oauth.authorize(callback=url_for( 'oauth_callback', _scheme=scheme,", "import_module import flask_login from flask import url_for, redirect, request # Need to expose", "downstream # flake8: noqa: F401 # noinspection PyUnresolvedReferences from flask_login import current_user, logout_user,", "def is_active(self): \"\"\"Required by flask_login\"\"\" return self.user @property def is_authenticated(self): \"\"\"Required by flask_login\"\"\"", "init_app(self, flask_app): self.flask_app = flask_app self.login_manager.init_app(self.flask_app) self.oauth = OAuth(self.flask_app).remote_app( 'oauth', consumer_key=get_config_param('client_id'), consumer_secret=get_config_param('client_secret'), base_url=get_config_param('base_url'),", "information response from OAuth client Returns ------- (bool, bool, bool) Return if 1.", "CONDITIONS OF ANY # KIND, either express or implied. See the License for", "return self.user @property def is_anonymous(self): \"\"\"Required by flask_login\"\"\" return False def get_id(self): \"\"\"Returns", "else 'None')) return resp.data def dict_get(self, dic, key): keys = key.split(\".\") value =", "\"\"\"Provides access to data profiling tools\"\"\" return self.user.superuser if self.user else False def", "work for additional information # regarding copyright ownership. The ASF licenses this file", "else False class AuthenticationError(Exception): pass class OAuthBackend(object): def __init__(self): self.login_manager = flask_login.LoginManager() self.login_manager.login_view", "def is_superuser(self): \"\"\"Access all the things\"\"\" return self.user.superuser if self.user else False class", "else None return self.oauth.authorize(callback=url_for( 'oauth_callback', _scheme=scheme, _external=True), state=request.args.get('next') or request.referrer or None) def", "self.oauth = None self.api_rev = None def init_app(self, flask_app): self.flask_app = flask_app self.login_manager.init_app(self.flask_app)", "import LoggingMixin import os, ssl if (os.environ.get('PYTHONHTTPSVERIFY', '') is '0' and getattr(ssl, '_create_unverified_context',", "None user = session.query(models.User).filter( models.User.id == int(userid)).first() return OAuthUser(user) def authorize(self, authorized_response, user_info):", "'oauth', consumer_key=get_config_param('client_id'), consumer_secret=get_config_param('client_secret'), base_url=get_config_param('base_url'), request_token_params={'scope': [ \"user:info\", \"user:check-access\" ]}, request_token_url=None, access_token_method=get_config_param('access_token_method'), access_token_url=get_config_param('access_token_url'), authorize_url=get_config_param('authorize_url'))", "licenses this file # to you under the Apache License, Version 2.0 (the", "denying access.' ) access_token = resp['access_token'] user_info = self.get_user_profile_info(access_token) username_key = get_config_param(\"username_key\") email_key", "login(self, request): log.debug('Redirecting user to OAuth login') scheme = request.environ['HTTP_X_FORWARDED_PROTO'] \\ if 'HTTP_X_FORWARDED_PROTO'", "== int(userid)).first() return OAuthUser(user) def authorize(self, authorized_response, user_info): \"\"\" Parameters ---------- authorized_response Authorized", "@provide_session def load_user(self, userid, session=None): if not userid or userid == 'None': return", "# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either", "# # Licensed to the Apache Software Foundation (ASF) under one # or", "None self.api_rev = None def init_app(self, flask_app): self.flask_app = flask_app self.login_manager.init_app(self.flask_app) self.oauth =", "express or implied. See the License for the # specific language governing permissions", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "flask_oauthlib.client import OAuth from airflow import models, configuration from airflow.utils.db import provide_session from", "not resp or resp.status != 200: raise AuthenticationError( 'Failed to fetch user profile,", "flask_login from flask import url_for, redirect, request # Need to expose these downstream", "you under the Apache License, Version 2.0 (the # \"License\"); you may not", "= import_module(get_config_param(\"oauth_permission_backend\")) return permission_backend.authorize(self.oauth, authorized_response, user_info) return True, True @provide_session def oauth_callback(self, session=None):", "return self.user @property def is_authenticated(self): \"\"\"Required by flask_login\"\"\" return self.user @property def is_anonymous(self):", "License is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "email = self.dict_get(user_info, email_key) authorized, superuser = self.authorize(resp, user_info) except AuthenticationError: return redirect(url_for('airflow.noaccess'))", "language governing permissions and limitations # under the License. from importlib import import_module", "value[k] return value @provide_session def load_user(self, userid, session=None): if not userid or userid", "client user_info: dict User information response from OAuth client Returns ------- (bool, bool,", "def load_user(self, userid, session=None): if not userid or userid == 'None': return None", "\"\"\" if has_config_param(\"oauth_permission_backend\"): permission_backend = import_module(get_config_param(\"oauth_permission_backend\")) return permission_backend.authorize(self.oauth, authorized_response, user_info) return True, True", "login_user from flask_oauthlib.client import OAuth from airflow import models, configuration from airflow.utils.db import", "access airflow, 2. if the user is a superuser \"\"\" if has_config_param(\"oauth_permission_backend\"): permission_backend", "flask_login\"\"\" return self.user.get_id() def data_profiling(self): \"\"\"Provides access to data profiling tools\"\"\" return self.user.superuser", "scheme = request.environ['HTTP_X_FORWARDED_PROTO'] \\ if 'HTTP_X_FORWARDED_PROTO' in request.environ and request.environ['HTTP_X_FORWARDED_PROTO'] \\ else request.scheme", "= dic for k in keys: value = value[k] return value @provide_session def", "self.login_manager.user_loader(self.load_user) self.flask_app.add_url_rule(get_config_param('oauth_callback_route'), 'oauth_callback', self.oauth_callback) def login(self, request): log.debug('Redirecting user to OAuth login') scheme", "self.get_user_profile_info(access_token) username_key = get_config_param(\"username_key\") email_key = get_config_param(\"email_key\") username = self.dict_get(user_info, username_key) email =", "username_key = get_config_param(\"username_key\") email_key = get_config_param(\"email_key\") username = self.dict_get(user_info, username_key) email = self.dict_get(user_info,", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "under the Apache License, Version 2.0 (the # \"License\"); you may not use", "if not resp or resp.status != 200: raise AuthenticationError( 'Failed to fetch user", "License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "except AuthenticationError: return redirect(url_for('airflow.noaccess')) user = session.query(models.User).filter( models.User.username == username).first() if not authorized:", "None return self.oauth.authorize(callback=url_for( 'oauth_callback', _scheme=scheme, _external=True), state=request.args.get('next') or request.referrer or None) def get_user_profile_info(self,", "or implied. See the License for the # specific language governing permissions and", "OAuth(self.flask_app).remote_app( 'oauth', consumer_key=get_config_param('client_id'), consumer_secret=get_config_param('client_secret'), base_url=get_config_param('base_url'), request_token_params={'scope': [ \"user:info\", \"user:check-access\" ]}, request_token_url=None, access_token_method=get_config_param('access_token_method'), access_token_url=get_config_param('access_token_url'),", "'oauth_callback', _scheme=scheme, _external=True), state=request.args.get('next') or request.referrer or None) def get_user_profile_info(self, access_token): resp =", "session.query(models.User).filter( models.User.id == int(userid)).first() return OAuthUser(user) def authorize(self, authorized_response, user_info): \"\"\" Parameters ----------", "email=email, superuser=superuser) user.superuser = superuser session.merge(user) session.commit() login_user(OAuthUser(user)) session.commit() return redirect(next_url) login_manager =", "distributed under the License is distributed on an # \"AS IS\" BASIS, WITHOUT", "@property def is_active(self): \"\"\"Required by flask_login\"\"\" return self.user @property def is_authenticated(self): \"\"\"Required by", "os, ssl if (os.environ.get('PYTHONHTTPSVERIFY', '') is '0' and getattr(ssl, '_create_unverified_context', None)): ssl._create_default_https_context =", "login') scheme = request.environ['HTTP_X_FORWARDED_PROTO'] \\ if 'HTTP_X_FORWARDED_PROTO' in request.environ and request.environ['HTTP_X_FORWARDED_PROTO'] \\ else", "(bool, bool, bool) Return if 1. the user is allowed to access airflow,", "-*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF)", "permissions and limitations # under the License. from importlib import import_module import flask_login", "200: raise AuthenticationError( 'Failed to fetch user profile, status ({0})'.format( resp.status if resp", "get_user_profile_info(self, access_token): resp = self.oauth.get( get_config_param(\"user_info_url\"), token=(access_token, '')) if not resp or resp.status", "airflow, 2. if the user is a superuser \"\"\" if has_config_param(\"oauth_permission_backend\"): permission_backend =", "current_user, logout_user, login_required, login_user from flask_oauthlib.client import OAuth from airflow import models, configuration", "value = value[k] return value @provide_session def load_user(self, userid, session=None): if not userid", "flask_login\"\"\" return False def get_id(self): \"\"\"Returns the current user id as required by", "# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation", "if not user: user = models.User( username=username, email=email, superuser=superuser) user.superuser = superuser session.merge(user)", "return configuration.conf.has_option('oauth', param) class OAuthUser(models.User): def __init__(self, user): self.user = user @property def", "permission_backend.authorize(self.oauth, authorized_response, user_info) return True, True @provide_session def oauth_callback(self, session=None): log.debug('OAuth callback called')", "return redirect(url_for('airflow.noaccess')) user = session.query(models.User).filter( models.User.username == username).first() if not authorized: if user:", "authorized: if user: session.delete(user) session.commit() return redirect(url_for('airflow.noaccess')) if not user: user = models.User(", "Unless required by applicable law or agreed to in writing, # software distributed", "F401 # noinspection PyUnresolvedReferences from flask_login import current_user, logout_user, login_required, login_user from flask_oauthlib.client", "data profiling tools\"\"\" return self.user.superuser if self.user else False def is_superuser(self): \"\"\"Access all", "distributed with this work for additional information # regarding copyright ownership. The ASF", "if resp else 'None')) return resp.data def dict_get(self, dic, key): keys = key.split(\".\")", "= ssl._create_unverified_context log = LoggingMixin().log def get_config_param(param): return str(configuration.conf.get('oauth', param)) def has_config_param(param): return", "= user @property def is_active(self): \"\"\"Required by flask_login\"\"\" return self.user @property def is_authenticated(self):", "return True, True @provide_session def oauth_callback(self, session=None): log.debug('OAuth callback called') next_url = request.args.get('state')", "service, denying access.' ) access_token = resp['access_token'] user_info = self.get_user_profile_info(access_token) username_key = get_config_param(\"username_key\")", "login_required, login_user from flask_oauthlib.client import OAuth from airflow import models, configuration from airflow.utils.db", "class OAuthBackend(object): def __init__(self): self.login_manager = flask_login.LoginManager() self.login_manager.login_view = 'airflow.login' self.login_manager.login_message = None", "regarding copyright ownership. The ASF licenses this file # to you under the", "return OAuthUser(user) def authorize(self, authorized_response, user_info): \"\"\" Parameters ---------- authorized_response Authorized response from", "# KIND, either express or implied. See the License for the # specific", "superuser \"\"\" if has_config_param(\"oauth_permission_backend\"): permission_backend = import_module(get_config_param(\"oauth_permission_backend\")) return permission_backend.authorize(self.oauth, authorized_response, user_info) return True,", "models, configuration from airflow.utils.db import provide_session from airflow.utils.log.logging_mixin import LoggingMixin import os, ssl", "this work for additional information # regarding copyright ownership. The ASF licenses this", "ANY # KIND, either express or implied. See the License for the #", "specific language governing permissions and limitations # under the License. from importlib import", "contributor license agreements. See the NOTICE file # distributed with this work for", "if 'HTTP_X_FORWARDED_PROTO' in request.environ and request.environ['HTTP_X_FORWARDED_PROTO'] \\ else request.scheme if request.scheme \\ else", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "import models, configuration from airflow.utils.db import provide_session from airflow.utils.log.logging_mixin import LoggingMixin import os,", "= LoggingMixin().log def get_config_param(param): return str(configuration.conf.get('oauth', param)) def has_config_param(param): return configuration.conf.has_option('oauth', param) class", "these downstream # flake8: noqa: F401 # noinspection PyUnresolvedReferences from flask_login import current_user,", "username_key) email = self.dict_get(user_info, email_key) authorized, superuser = self.authorize(resp, user_info) except AuthenticationError: return", "id as required by flask_login\"\"\" return self.user.get_id() def data_profiling(self): \"\"\"Provides access to data", "import_module(get_config_param(\"oauth_permission_backend\")) return permission_backend.authorize(self.oauth, authorized_response, user_info) return True, True @provide_session def oauth_callback(self, session=None): log.debug('OAuth", "\"\"\"Required by flask_login\"\"\" return False def get_id(self): \"\"\"Returns the current user id as", "(os.environ.get('PYTHONHTTPSVERIFY', '') is '0' and getattr(ssl, '_create_unverified_context', None)): ssl._create_default_https_context = ssl._create_unverified_context log =", "keys = key.split(\".\") value = dic for k in keys: value = value[k]", "import current_user, logout_user, login_required, login_user from flask_oauthlib.client import OAuth from airflow import models,", "\\ else request.scheme if request.scheme \\ else None return self.oauth.authorize(callback=url_for( 'oauth_callback', _scheme=scheme, _external=True),", "if request.scheme \\ else None return self.oauth.authorize(callback=url_for( 'oauth_callback', _scheme=scheme, _external=True), state=request.args.get('next') or request.referrer", "See the License for the # specific language governing permissions and limitations #", "from flask_oauthlib.client import OAuth from airflow import models, configuration from airflow.utils.db import provide_session", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or", "\"\"\"Returns the current user id as required by flask_login\"\"\" return self.user.get_id() def data_profiling(self):", "authorize_url=get_config_param('authorize_url')) self.login_manager.user_loader(self.load_user) self.flask_app.add_url_rule(get_config_param('oauth_callback_route'), 'oauth_callback', self.oauth_callback) def login(self, request): log.debug('Redirecting user to OAuth login')", "2.0 (the # \"License\"); you may not use this file except in compliance", "the user is allowed to access airflow, 2. if the user is a", "= self.dict_get(user_info, username_key) email = self.dict_get(user_info, email_key) authorized, superuser = self.authorize(resp, user_info) except", "KIND, either express or implied. See the License for the # specific language", "def has_config_param(param): return configuration.conf.has_option('oauth', param) class OAuthUser(models.User): def __init__(self, user): self.user = user", "load_user(self, userid, session=None): if not userid or userid == 'None': return None user", "not userid or userid == 'None': return None user = session.query(models.User).filter( models.User.id ==", "User information response from OAuth client Returns ------- (bool, bool, bool) Return if", "== username).first() if not authorized: if user: session.delete(user) session.commit() return redirect(url_for('airflow.noaccess')) if not", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "def dict_get(self, dic, key): keys = key.split(\".\") value = dic for k in", "user_info: dict User information response from OAuth client Returns ------- (bool, bool, bool)", "AuthenticationError( 'Null response from OAuth service, denying access.' ) access_token = resp['access_token'] user_info", "flask_login\"\"\" return self.user @property def is_authenticated(self): \"\"\"Required by flask_login\"\"\" return self.user @property def", "return self.user.superuser if self.user else False def is_superuser(self): \"\"\"Access all the things\"\"\" return", "compliance # with the License. You may obtain a copy of the License", "request): log.debug('Redirecting user to OAuth login') scheme = request.environ['HTTP_X_FORWARDED_PROTO'] \\ if 'HTTP_X_FORWARDED_PROTO' in", "self.flask_app = None self.oauth = None self.api_rev = None def init_app(self, flask_app): self.flask_app", "= request.args.get('state') or url_for('admin.index') if get_config_param('base_url') in next_url: next_url = url_for('admin.index') resp =", "WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See", "with the License. You may obtain a copy of the License at #", "current user id as required by flask_login\"\"\" return self.user.get_id() def data_profiling(self): \"\"\"Provides access", "information # regarding copyright ownership. The ASF licenses this file # to you", "coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under", "self.flask_app.add_url_rule(get_config_param('oauth_callback_route'), 'oauth_callback', self.oauth_callback) def login(self, request): log.debug('Redirecting user to OAuth login') scheme =", "@provide_session def oauth_callback(self, session=None): log.debug('OAuth callback called') next_url = request.args.get('state') or url_for('admin.index') if", "required by flask_login\"\"\" return self.user.get_id() def data_profiling(self): \"\"\"Provides access to data profiling tools\"\"\"", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "one # or more contributor license agreements. See the NOTICE file # distributed", "Need to expose these downstream # flake8: noqa: F401 # noinspection PyUnresolvedReferences from", "except in compliance # with the License. You may obtain a copy of", "base_url=get_config_param('base_url'), request_token_params={'scope': [ \"user:info\", \"user:check-access\" ]}, request_token_url=None, access_token_method=get_config_param('access_token_method'), access_token_url=get_config_param('access_token_url'), authorize_url=get_config_param('authorize_url')) self.login_manager.user_loader(self.load_user) self.flask_app.add_url_rule(get_config_param('oauth_callback_route'), 'oauth_callback',", "request.scheme \\ else None return self.oauth.authorize(callback=url_for( 'oauth_callback', _scheme=scheme, _external=True), state=request.args.get('next') or request.referrer or", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "you may not use this file except in compliance # with the License.", "request.args.get('state') or url_for('admin.index') if get_config_param('base_url') in next_url: next_url = url_for('admin.index') resp = self.oauth.authorized_response()", "def authorize(self, authorized_response, user_info): \"\"\" Parameters ---------- authorized_response Authorized response from OAuth client", "email_key = get_config_param(\"email_key\") username = self.dict_get(user_info, username_key) email = self.dict_get(user_info, email_key) authorized, superuser", "ssl._create_unverified_context log = LoggingMixin().log def get_config_param(param): return str(configuration.conf.get('oauth', param)) def has_config_param(param): return configuration.conf.has_option('oauth',", "an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND,", "try: if resp is None: raise AuthenticationError( 'Null response from OAuth service, denying", "profiling tools\"\"\" return self.user.superuser if self.user else False def is_superuser(self): \"\"\"Access all the", "token=(access_token, '')) if not resp or resp.status != 200: raise AuthenticationError( 'Failed to", "None self.oauth = None self.api_rev = None def init_app(self, flask_app): self.flask_app = flask_app", "from airflow.utils.log.logging_mixin import LoggingMixin import os, ssl if (os.environ.get('PYTHONHTTPSVERIFY', '') is '0' and", "consumer_secret=get_config_param('client_secret'), base_url=get_config_param('base_url'), request_token_params={'scope': [ \"user:info\", \"user:check-access\" ]}, request_token_url=None, access_token_method=get_config_param('access_token_method'), access_token_url=get_config_param('access_token_url'), authorize_url=get_config_param('authorize_url')) self.login_manager.user_loader(self.load_user) self.flask_app.add_url_rule(get_config_param('oauth_callback_route'),", "True @provide_session def oauth_callback(self, session=None): log.debug('OAuth callback called') next_url = request.args.get('state') or url_for('admin.index')", "flake8: noqa: F401 # noinspection PyUnresolvedReferences from flask_login import current_user, logout_user, login_required, login_user", "authorized_response, user_info) return True, True @provide_session def oauth_callback(self, session=None): log.debug('OAuth callback called') next_url", "oauth_callback(self, session=None): log.debug('OAuth callback called') next_url = request.args.get('state') or url_for('admin.index') if get_config_param('base_url') in", "this file # to you under the Apache License, Version 2.0 (the #", "is a superuser \"\"\" if has_config_param(\"oauth_permission_backend\"): permission_backend = import_module(get_config_param(\"oauth_permission_backend\")) return permission_backend.authorize(self.oauth, authorized_response, user_info)", "called') next_url = request.args.get('state') or url_for('admin.index') if get_config_param('base_url') in next_url: next_url = url_for('admin.index')", "# # Unless required by applicable law or agreed to in writing, #", "return value @provide_session def load_user(self, userid, session=None): if not userid or userid ==", "__init__(self, user): self.user = user @property def is_active(self): \"\"\"Required by flask_login\"\"\" return self.user", "user = session.query(models.User).filter( models.User.username == username).first() if not authorized: if user: session.delete(user) session.commit()", "dict_get(self, dic, key): keys = key.split(\".\") value = dic for k in keys:", "AuthenticationError: return redirect(url_for('airflow.noaccess')) user = session.query(models.User).filter( models.User.username == username).first() if not authorized: if", "is '0' and getattr(ssl, '_create_unverified_context', None)): ssl._create_default_https_context = ssl._create_unverified_context log = LoggingMixin().log def", "to fetch user profile, status ({0})'.format( resp.status if resp else 'None')) return resp.data", "Version 2.0 (the # \"License\"); you may not use this file except in", "for the # specific language governing permissions and limitations # under the License.", "or url_for('admin.index') if get_config_param('base_url') in next_url: next_url = url_for('admin.index') resp = self.oauth.authorized_response() try:", "url_for('admin.index') if get_config_param('base_url') in next_url: next_url = url_for('admin.index') resp = self.oauth.authorized_response() try: if", "resp or resp.status != 200: raise AuthenticationError( 'Failed to fetch user profile, status", "authorized_response Authorized response from OAuth client user_info: dict User information response from OAuth", "all the things\"\"\" return self.user.superuser if self.user else False class AuthenticationError(Exception): pass class", "= resp['access_token'] user_info = self.get_user_profile_info(access_token) username_key = get_config_param(\"username_key\") email_key = get_config_param(\"email_key\") username =", "OAuth login') scheme = request.environ['HTTP_X_FORWARDED_PROTO'] \\ if 'HTTP_X_FORWARDED_PROTO' in request.environ and request.environ['HTTP_X_FORWARDED_PROTO'] \\", "things\"\"\" return self.user.superuser if self.user else False class AuthenticationError(Exception): pass class OAuthBackend(object): def", "superuser session.merge(user) session.commit() login_user(OAuthUser(user)) session.commit() return redirect(next_url) login_manager = OAuthBackend() def login(self, request):", "\\ if 'HTTP_X_FORWARDED_PROTO' in request.environ and request.environ['HTTP_X_FORWARDED_PROTO'] \\ else request.scheme if request.scheme \\", "OF ANY # KIND, either express or implied. See the License for the", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied.", "to expose these downstream # flake8: noqa: F401 # noinspection PyUnresolvedReferences from flask_login", "False def is_superuser(self): \"\"\"Access all the things\"\"\" return self.user.superuser if self.user else False", "or None) def get_user_profile_info(self, access_token): resp = self.oauth.get( get_config_param(\"user_info_url\"), token=(access_token, '')) if not", "user = models.User( username=username, email=email, superuser=superuser) user.superuser = superuser session.merge(user) session.commit() login_user(OAuthUser(user)) session.commit()", "access to data profiling tools\"\"\" return self.user.superuser if self.user else False def is_superuser(self):", "self.login_manager = flask_login.LoginManager() self.login_manager.login_view = 'airflow.login' self.login_manager.login_message = None self.flask_app = None self.oauth", "authorized_response, user_info): \"\"\" Parameters ---------- authorized_response Authorized response from OAuth client user_info: dict", "callback called') next_url = request.args.get('state') or url_for('admin.index') if get_config_param('base_url') in next_url: next_url =", "request_token_url=None, access_token_method=get_config_param('access_token_method'), access_token_url=get_config_param('access_token_url'), authorize_url=get_config_param('authorize_url')) self.login_manager.user_loader(self.load_user) self.flask_app.add_url_rule(get_config_param('oauth_callback_route'), 'oauth_callback', self.oauth_callback) def login(self, request): log.debug('Redirecting user", "access.' ) access_token = resp['access_token'] user_info = self.get_user_profile_info(access_token) username_key = get_config_param(\"username_key\") email_key =", "user_info): \"\"\" Parameters ---------- authorized_response Authorized response from OAuth client user_info: dict User", "= self.authorize(resp, user_info) except AuthenticationError: return redirect(url_for('airflow.noaccess')) user = session.query(models.User).filter( models.User.username == username).first()", "by flask_login\"\"\" return self.user @property def is_anonymous(self): \"\"\"Required by flask_login\"\"\" return False def", "Return if 1. the user is allowed to access airflow, 2. if the", "if user: session.delete(user) session.commit() return redirect(url_for('airflow.noaccess')) if not user: user = models.User( username=username,", "to access airflow, 2. if the user is a superuser \"\"\" if has_config_param(\"oauth_permission_backend\"):", "License, Version 2.0 (the # \"License\"); you may not use this file except", "= None self.flask_app = None self.oauth = None self.api_rev = None def init_app(self,", "userid, session=None): if not userid or userid == 'None': return None user =", "this file except in compliance # with the License. You may obtain a", "the License. from importlib import import_module import flask_login from flask import url_for, redirect,", "= self.oauth.get( get_config_param(\"user_info_url\"), token=(access_token, '')) if not resp or resp.status != 200: raise", "'_create_unverified_context', None)): ssl._create_default_https_context = ssl._create_unverified_context log = LoggingMixin().log def get_config_param(param): return str(configuration.conf.get('oauth', param))", "from OAuth client user_info: dict User information response from OAuth client Returns -------", "may not use this file except in compliance # with the License. You", "if the user is a superuser \"\"\" if has_config_param(\"oauth_permission_backend\"): permission_backend = import_module(get_config_param(\"oauth_permission_backend\")) return", "@property def is_anonymous(self): \"\"\"Required by flask_login\"\"\" return False def get_id(self): \"\"\"Returns the current", "self.oauth.get( get_config_param(\"user_info_url\"), token=(access_token, '')) if not resp or resp.status != 200: raise AuthenticationError(", "user: user = models.User( username=username, email=email, superuser=superuser) user.superuser = superuser session.merge(user) session.commit() login_user(OAuthUser(user))", "ASF licenses this file # to you under the Apache License, Version 2.0", "!= 200: raise AuthenticationError( 'Failed to fetch user profile, status ({0})'.format( resp.status if", "url_for('admin.index') resp = self.oauth.authorized_response() try: if resp is None: raise AuthenticationError( 'Null response", "AuthenticationError( 'Failed to fetch user profile, status ({0})'.format( resp.status if resp else 'None'))", "\"\"\"Access all the things\"\"\" return self.user.superuser if self.user else False class AuthenticationError(Exception): pass", "under the License. from importlib import import_module import flask_login from flask import url_for,", "resp is None: raise AuthenticationError( 'Null response from OAuth service, denying access.' )", "request.environ['HTTP_X_FORWARDED_PROTO'] \\ if 'HTTP_X_FORWARDED_PROTO' in request.environ and request.environ['HTTP_X_FORWARDED_PROTO'] \\ else request.scheme if request.scheme", "# distributed with this work for additional information # regarding copyright ownership. The", "# flake8: noqa: F401 # noinspection PyUnresolvedReferences from flask_login import current_user, logout_user, login_required,", "2. if the user is a superuser \"\"\" if has_config_param(\"oauth_permission_backend\"): permission_backend = import_module(get_config_param(\"oauth_permission_backend\"))", "by flask_login\"\"\" return self.user @property def is_authenticated(self): \"\"\"Required by flask_login\"\"\" return self.user @property", "and request.environ['HTTP_X_FORWARDED_PROTO'] \\ else request.scheme if request.scheme \\ else None return self.oauth.authorize(callback=url_for( 'oauth_callback',", "on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #", "1. the user is allowed to access airflow, 2. if the user is", "with this work for additional information # regarding copyright ownership. The ASF licenses", "def get_id(self): \"\"\"Returns the current user id as required by flask_login\"\"\" return self.user.get_id()", "the License. You may obtain a copy of the License at # #", "= self.get_user_profile_info(access_token) username_key = get_config_param(\"username_key\") email_key = get_config_param(\"email_key\") username = self.dict_get(user_info, username_key) email", "user id as required by flask_login\"\"\" return self.user.get_id() def data_profiling(self): \"\"\"Provides access to", "agreements. See the NOTICE file # distributed with this work for additional information", "return False def get_id(self): \"\"\"Returns the current user id as required by flask_login\"\"\"", "next_url = url_for('admin.index') resp = self.oauth.authorized_response() try: if resp is None: raise AuthenticationError(", "user_info = self.get_user_profile_info(access_token) username_key = get_config_param(\"username_key\") email_key = get_config_param(\"email_key\") username = self.dict_get(user_info, username_key)", "user: session.delete(user) session.commit() return redirect(url_for('airflow.noaccess')) if not user: user = models.User( username=username, email=email,", "writing, # software distributed under the License is distributed on an # \"AS", "str(configuration.conf.get('oauth', param)) def has_config_param(param): return configuration.conf.has_option('oauth', param) class OAuthUser(models.User): def __init__(self, user): self.user", "return permission_backend.authorize(self.oauth, authorized_response, user_info) return True, True @provide_session def oauth_callback(self, session=None): log.debug('OAuth callback", "models.User.username == username).first() if not authorized: if user: session.delete(user) session.commit() return redirect(url_for('airflow.noaccess')) if", "self.oauth.authorize(callback=url_for( 'oauth_callback', _scheme=scheme, _external=True), state=request.args.get('next') or request.referrer or None) def get_user_profile_info(self, access_token): resp", "_external=True), state=request.args.get('next') or request.referrer or None) def get_user_profile_info(self, access_token): resp = self.oauth.get( get_config_param(\"user_info_url\"),", "NOTICE file # distributed with this work for additional information # regarding copyright", "= 'airflow.login' self.login_manager.login_message = None self.flask_app = None self.oauth = None self.api_rev =", "is_active(self): \"\"\"Required by flask_login\"\"\" return self.user @property def is_authenticated(self): \"\"\"Required by flask_login\"\"\" return", "if not userid or userid == 'None': return None user = session.query(models.User).filter( models.User.id", "'airflow.login' self.login_manager.login_message = None self.flask_app = None self.oauth = None self.api_rev = None", "utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one", "'None': return None user = session.query(models.User).filter( models.User.id == int(userid)).first() return OAuthUser(user) def authorize(self,", "\\ else None return self.oauth.authorize(callback=url_for( 'oauth_callback', _scheme=scheme, _external=True), state=request.args.get('next') or request.referrer or None)", "= get_config_param(\"username_key\") email_key = get_config_param(\"email_key\") username = self.dict_get(user_info, username_key) email = self.dict_get(user_info, email_key)", "self.user else False class AuthenticationError(Exception): pass class OAuthBackend(object): def __init__(self): self.login_manager = flask_login.LoginManager()", "@property def is_authenticated(self): \"\"\"Required by flask_login\"\"\" return self.user @property def is_anonymous(self): \"\"\"Required by", "param) class OAuthUser(models.User): def __init__(self, user): self.user = user @property def is_active(self): \"\"\"Required", "AuthenticationError(Exception): pass class OAuthBackend(object): def __init__(self): self.login_manager = flask_login.LoginManager() self.login_manager.login_view = 'airflow.login' self.login_manager.login_message", "\"user:check-access\" ]}, request_token_url=None, access_token_method=get_config_param('access_token_method'), access_token_url=get_config_param('access_token_url'), authorize_url=get_config_param('authorize_url')) self.login_manager.user_loader(self.load_user) self.flask_app.add_url_rule(get_config_param('oauth_callback_route'), 'oauth_callback', self.oauth_callback) def login(self, request):", "'') is '0' and getattr(ssl, '_create_unverified_context', None)): ssl._create_default_https_context = ssl._create_unverified_context log = LoggingMixin().log", "= None self.api_rev = None def init_app(self, flask_app): self.flask_app = flask_app self.login_manager.init_app(self.flask_app) self.oauth", "request_token_params={'scope': [ \"user:info\", \"user:check-access\" ]}, request_token_url=None, access_token_method=get_config_param('access_token_method'), access_token_url=get_config_param('access_token_url'), authorize_url=get_config_param('authorize_url')) self.login_manager.user_loader(self.load_user) self.flask_app.add_url_rule(get_config_param('oauth_callback_route'), 'oauth_callback', self.oauth_callback)", "or userid == 'None': return None user = session.query(models.User).filter( models.User.id == int(userid)).first() return", "def init_app(self, flask_app): self.flask_app = flask_app self.login_manager.init_app(self.flask_app) self.oauth = OAuth(self.flask_app).remote_app( 'oauth', consumer_key=get_config_param('client_id'), consumer_secret=get_config_param('client_secret'),", "= session.query(models.User).filter( models.User.id == int(userid)).first() return OAuthUser(user) def authorize(self, authorized_response, user_info): \"\"\" Parameters", "user_info) return True, True @provide_session def oauth_callback(self, session=None): log.debug('OAuth callback called') next_url =", "True, True @provide_session def oauth_callback(self, session=None): log.debug('OAuth callback called') next_url = request.args.get('state') or", "Authorized response from OAuth client user_info: dict User information response from OAuth client", "value = dic for k in keys: value = value[k] return value @provide_session", "the Apache License, Version 2.0 (the # \"License\"); you may not use this", "\"\"\"Required by flask_login\"\"\" return self.user @property def is_anonymous(self): \"\"\"Required by flask_login\"\"\" return False", "superuser = self.authorize(resp, user_info) except AuthenticationError: return redirect(url_for('airflow.noaccess')) user = session.query(models.User).filter( models.User.username ==", "session=None): if not userid or userid == 'None': return None user = session.query(models.User).filter(", "None)): ssl._create_default_https_context = ssl._create_unverified_context log = LoggingMixin().log def get_config_param(param): return str(configuration.conf.get('oauth', param)) def", "request.referrer or None) def get_user_profile_info(self, access_token): resp = self.oauth.get( get_config_param(\"user_info_url\"), token=(access_token, '')) if", "if not authorized: if user: session.delete(user) session.commit() return redirect(url_for('airflow.noaccess')) if not user: user", "is_anonymous(self): \"\"\"Required by flask_login\"\"\" return False def get_id(self): \"\"\"Returns the current user id", "next_url: next_url = url_for('admin.index') resp = self.oauth.authorized_response() try: if resp is None: raise", "# Need to expose these downstream # flake8: noqa: F401 # noinspection PyUnresolvedReferences", "ssl._create_default_https_context = ssl._create_unverified_context log = LoggingMixin().log def get_config_param(param): return str(configuration.conf.get('oauth', param)) def has_config_param(param):", "not user: user = models.User( username=username, email=email, superuser=superuser) user.superuser = superuser session.merge(user) session.commit()", "keys: value = value[k] return value @provide_session def load_user(self, userid, session=None): if not", ") access_token = resp['access_token'] user_info = self.get_user_profile_info(access_token) username_key = get_config_param(\"username_key\") email_key = get_config_param(\"email_key\")", "The ASF licenses this file # to you under the Apache License, Version", "file except in compliance # with the License. You may obtain a copy", "file # to you under the Apache License, Version 2.0 (the # \"License\");", "if get_config_param('base_url') in next_url: next_url = url_for('admin.index') resp = self.oauth.authorized_response() try: if resp", "in keys: value = value[k] return value @provide_session def load_user(self, userid, session=None): if", "OAuth client user_info: dict User information response from OAuth client Returns ------- (bool,", "by flask_login\"\"\" return False def get_id(self): \"\"\"Returns the current user id as required", "for k in keys: value = value[k] return value @provide_session def load_user(self, userid,", "models.User( username=username, email=email, superuser=superuser) user.superuser = superuser session.merge(user) session.commit() login_user(OAuthUser(user)) session.commit() return redirect(next_url)", "k in keys: value = value[k] return value @provide_session def load_user(self, userid, session=None):", "session=None): log.debug('OAuth callback called') next_url = request.args.get('state') or url_for('admin.index') if get_config_param('base_url') in next_url:", "(the # \"License\"); you may not use this file except in compliance #", "limitations # under the License. from importlib import import_module import flask_login from flask", "'oauth_callback', self.oauth_callback) def login(self, request): log.debug('Redirecting user to OAuth login') scheme = request.environ['HTTP_X_FORWARDED_PROTO']", "-*- # # Licensed to the Apache Software Foundation (ASF) under one #", "from OAuth client Returns ------- (bool, bool, bool) Return if 1. the user", "user profile, status ({0})'.format( resp.status if resp else 'None')) return resp.data def dict_get(self,", "def oauth_callback(self, session=None): log.debug('OAuth callback called') next_url = request.args.get('state') or url_for('admin.index') if get_config_param('base_url')", "else False def is_superuser(self): \"\"\"Access all the things\"\"\" return self.user.superuser if self.user else", "the current user id as required by flask_login\"\"\" return self.user.get_id() def data_profiling(self): \"\"\"Provides", "request.scheme if request.scheme \\ else None return self.oauth.authorize(callback=url_for( 'oauth_callback', _scheme=scheme, _external=True), state=request.args.get('next') or", "is_superuser(self): \"\"\"Access all the things\"\"\" return self.user.superuser if self.user else False class AuthenticationError(Exception):", "law or agreed to in writing, # software distributed under the License is", "param)) def has_config_param(param): return configuration.conf.has_option('oauth', param) class OAuthUser(models.User): def __init__(self, user): self.user =", "OAuthBackend(object): def __init__(self): self.login_manager = flask_login.LoginManager() self.login_manager.login_view = 'airflow.login' self.login_manager.login_message = None self.flask_app", "# software distributed under the License is distributed on an # \"AS IS\"", "to you under the Apache License, Version 2.0 (the # \"License\"); you may", "username).first() if not authorized: if user: session.delete(user) session.commit() return redirect(url_for('airflow.noaccess')) if not user:", "= flask_app self.login_manager.init_app(self.flask_app) self.oauth = OAuth(self.flask_app).remote_app( 'oauth', consumer_key=get_config_param('client_id'), consumer_secret=get_config_param('client_secret'), base_url=get_config_param('base_url'), request_token_params={'scope': [ \"user:info\",", "file # distributed with this work for additional information # regarding copyright ownership.", "user is allowed to access airflow, 2. if the user is a superuser", "consumer_key=get_config_param('client_id'), consumer_secret=get_config_param('client_secret'), base_url=get_config_param('base_url'), request_token_params={'scope': [ \"user:info\", \"user:check-access\" ]}, request_token_url=None, access_token_method=get_config_param('access_token_method'), access_token_url=get_config_param('access_token_url'), authorize_url=get_config_param('authorize_url')) self.login_manager.user_loader(self.load_user)", "# Licensed to the Apache Software Foundation (ASF) under one # or more", "username = self.dict_get(user_info, username_key) email = self.dict_get(user_info, email_key) authorized, superuser = self.authorize(resp, user_info)", "self.login_manager.login_message = None self.flask_app = None self.oauth = None self.api_rev = None def", "PyUnresolvedReferences from flask_login import current_user, logout_user, login_required, login_user from flask_oauthlib.client import OAuth from", "authorize(self, authorized_response, user_info): \"\"\" Parameters ---------- authorized_response Authorized response from OAuth client user_info:", "= models.User( username=username, email=email, superuser=superuser) user.superuser = superuser session.merge(user) session.commit() login_user(OAuthUser(user)) session.commit() return", "copyright ownership. The ASF licenses this file # to you under the Apache", "ownership. The ASF licenses this file # to you under the Apache License,", "None: raise AuthenticationError( 'Null response from OAuth service, denying access.' ) access_token =", "License. from importlib import import_module import flask_login from flask import url_for, redirect, request", "session.commit() login_user(OAuthUser(user)) session.commit() return redirect(next_url) login_manager = OAuthBackend() def login(self, request): return login_manager.login(request)", "to data profiling tools\"\"\" return self.user.superuser if self.user else False def is_superuser(self): \"\"\"Access", "None) def get_user_profile_info(self, access_token): resp = self.oauth.get( get_config_param(\"user_info_url\"), token=(access_token, '')) if not resp", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "or resp.status != 200: raise AuthenticationError( 'Failed to fetch user profile, status ({0})'.format(", "session.merge(user) session.commit() login_user(OAuthUser(user)) session.commit() return redirect(next_url) login_manager = OAuthBackend() def login(self, request): return", "# Unless required by applicable law or agreed to in writing, # software", "return str(configuration.conf.get('oauth', param)) def has_config_param(param): return configuration.conf.has_option('oauth', param) class OAuthUser(models.User): def __init__(self, user):", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "return resp.data def dict_get(self, dic, key): keys = key.split(\".\") value = dic for", "in next_url: next_url = url_for('admin.index') resp = self.oauth.authorized_response() try: if resp is None:", "= get_config_param(\"email_key\") username = self.dict_get(user_info, username_key) email = self.dict_get(user_info, email_key) authorized, superuser =", "def __init__(self, user): self.user = user @property def is_active(self): \"\"\"Required by flask_login\"\"\" return", "resp = self.oauth.authorized_response() try: if resp is None: raise AuthenticationError( 'Null response from", "to in writing, # software distributed under the License is distributed on an", "agreed to in writing, # software distributed under the License is distributed on", "a superuser \"\"\" if has_config_param(\"oauth_permission_backend\"): permission_backend = import_module(get_config_param(\"oauth_permission_backend\")) return permission_backend.authorize(self.oauth, authorized_response, user_info) return", "def get_user_profile_info(self, access_token): resp = self.oauth.get( get_config_param(\"user_info_url\"), token=(access_token, '')) if not resp or", "from importlib import import_module import flask_login from flask import url_for, redirect, request #", "self.flask_app = flask_app self.login_manager.init_app(self.flask_app) self.oauth = OAuth(self.flask_app).remote_app( 'oauth', consumer_key=get_config_param('client_id'), consumer_secret=get_config_param('client_secret'), base_url=get_config_param('base_url'), request_token_params={'scope': [", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "to the Apache Software Foundation (ASF) under one # or more contributor license", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "import provide_session from airflow.utils.log.logging_mixin import LoggingMixin import os, ssl if (os.environ.get('PYTHONHTTPSVERIFY', '') is", "Returns ------- (bool, bool, bool) Return if 1. the user is allowed to", "self.login_manager.init_app(self.flask_app) self.oauth = OAuth(self.flask_app).remote_app( 'oauth', consumer_key=get_config_param('client_id'), consumer_secret=get_config_param('client_secret'), base_url=get_config_param('base_url'), request_token_params={'scope': [ \"user:info\", \"user:check-access\" ]},", "'Null response from OAuth service, denying access.' ) access_token = resp['access_token'] user_info =", "superuser=superuser) user.superuser = superuser session.merge(user) session.commit() login_user(OAuthUser(user)) session.commit() return redirect(next_url) login_manager = OAuthBackend()", "self.user @property def is_anonymous(self): \"\"\"Required by flask_login\"\"\" return False def get_id(self): \"\"\"Returns the", "from flask import url_for, redirect, request # Need to expose these downstream #", "self.login_manager.login_view = 'airflow.login' self.login_manager.login_message = None self.flask_app = None self.oauth = None self.api_rev", "if resp is None: raise AuthenticationError( 'Null response from OAuth service, denying access.'", "------- (bool, bool, bool) Return if 1. the user is allowed to access", "use this file except in compliance # with the License. You may obtain", "({0})'.format( resp.status if resp else 'None')) return resp.data def dict_get(self, dic, key): keys", "the # specific language governing permissions and limitations # under the License. from", "---------- authorized_response Authorized response from OAuth client user_info: dict User information response from", "Software Foundation (ASF) under one # or more contributor license agreements. See the", "== 'None': return None user = session.query(models.User).filter( models.User.id == int(userid)).first() return OAuthUser(user) def", "get_config_param(\"username_key\") email_key = get_config_param(\"email_key\") username = self.dict_get(user_info, username_key) email = self.dict_get(user_info, email_key) authorized,", "= None self.oauth = None self.api_rev = None def init_app(self, flask_app): self.flask_app =", "value @provide_session def load_user(self, userid, session=None): if not userid or userid == 'None':", "the License is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "bool, bool) Return if 1. the user is allowed to access airflow, 2.", "self.api_rev = None def init_app(self, flask_app): self.flask_app = flask_app self.login_manager.init_app(self.flask_app) self.oauth = OAuth(self.flask_app).remote_app(", "redirect(url_for('airflow.noaccess')) if not user: user = models.User( username=username, email=email, superuser=superuser) user.superuser = superuser", "expose these downstream # flake8: noqa: F401 # noinspection PyUnresolvedReferences from flask_login import", "log.debug('Redirecting user to OAuth login') scheme = request.environ['HTTP_X_FORWARDED_PROTO'] \\ if 'HTTP_X_FORWARDED_PROTO' in request.environ", "resp.status if resp else 'None')) return resp.data def dict_get(self, dic, key): keys =", "LoggingMixin().log def get_config_param(param): return str(configuration.conf.get('oauth', param)) def has_config_param(param): return configuration.conf.has_option('oauth', param) class OAuthUser(models.User):", "See the NOTICE file # distributed with this work for additional information #", "state=request.args.get('next') or request.referrer or None) def get_user_profile_info(self, access_token): resp = self.oauth.get( get_config_param(\"user_info_url\"), token=(access_token,", "user = session.query(models.User).filter( models.User.id == int(userid)).first() return OAuthUser(user) def authorize(self, authorized_response, user_info): \"\"\"", "access_token_method=get_config_param('access_token_method'), access_token_url=get_config_param('access_token_url'), authorize_url=get_config_param('authorize_url')) self.login_manager.user_loader(self.load_user) self.flask_app.add_url_rule(get_config_param('oauth_callback_route'), 'oauth_callback', self.oauth_callback) def login(self, request): log.debug('Redirecting user to", "self.dict_get(user_info, username_key) email = self.dict_get(user_info, email_key) authorized, superuser = self.authorize(resp, user_info) except AuthenticationError:", "dic for k in keys: value = value[k] return value @provide_session def load_user(self,", "the NOTICE file # distributed with this work for additional information # regarding", "from flask_login import current_user, logout_user, login_required, login_user from flask_oauthlib.client import OAuth from airflow", "in writing, # software distributed under the License is distributed on an #", "the Apache Software Foundation (ASF) under one # or more contributor license agreements.", "OAuth client Returns ------- (bool, bool, bool) Return if 1. the user is", "governing permissions and limitations # under the License. from importlib import import_module import", "= None def init_app(self, flask_app): self.flask_app = flask_app self.login_manager.init_app(self.flask_app) self.oauth = OAuth(self.flask_app).remote_app( 'oauth',", "def login(self, request): log.debug('Redirecting user to OAuth login') scheme = request.environ['HTTP_X_FORWARDED_PROTO'] \\ if", "bool) Return if 1. the user is allowed to access airflow, 2. if", "user is a superuser \"\"\" if has_config_param(\"oauth_permission_backend\"): permission_backend = import_module(get_config_param(\"oauth_permission_backend\")) return permission_backend.authorize(self.oauth, authorized_response,", "]}, request_token_url=None, access_token_method=get_config_param('access_token_method'), access_token_url=get_config_param('access_token_url'), authorize_url=get_config_param('authorize_url')) self.login_manager.user_loader(self.load_user) self.flask_app.add_url_rule(get_config_param('oauth_callback_route'), 'oauth_callback', self.oauth_callback) def login(self, request): log.debug('Redirecting", "logout_user, login_required, login_user from flask_oauthlib.client import OAuth from airflow import models, configuration from", "session.query(models.User).filter( models.User.username == username).first() if not authorized: if user: session.delete(user) session.commit() return redirect(url_for('airflow.noaccess'))", "ssl if (os.environ.get('PYTHONHTTPSVERIFY', '') is '0' and getattr(ssl, '_create_unverified_context', None)): ssl._create_default_https_context = ssl._create_unverified_context", "if (os.environ.get('PYTHONHTTPSVERIFY', '') is '0' and getattr(ssl, '_create_unverified_context', None)): ssl._create_default_https_context = ssl._create_unverified_context log", "session.commit() return redirect(url_for('airflow.noaccess')) if not user: user = models.User( username=username, email=email, superuser=superuser) user.superuser", "flask_login\"\"\" return self.user @property def is_anonymous(self): \"\"\"Required by flask_login\"\"\" return False def get_id(self):", "# noinspection PyUnresolvedReferences from flask_login import current_user, logout_user, login_required, login_user from flask_oauthlib.client import", "# with the License. You may obtain a copy of the License at", "= value[k] return value @provide_session def load_user(self, userid, session=None): if not userid or", "OAuth service, denying access.' ) access_token = resp['access_token'] user_info = self.get_user_profile_info(access_token) username_key =", "get_config_param('base_url') in next_url: next_url = url_for('admin.index') resp = self.oauth.authorized_response() try: if resp is", "not authorized: if user: session.delete(user) session.commit() return redirect(url_for('airflow.noaccess')) if not user: user =", "data_profiling(self): \"\"\"Provides access to data profiling tools\"\"\" return self.user.superuser if self.user else False", "\"user:info\", \"user:check-access\" ]}, request_token_url=None, access_token_method=get_config_param('access_token_method'), access_token_url=get_config_param('access_token_url'), authorize_url=get_config_param('authorize_url')) self.login_manager.user_loader(self.load_user) self.flask_app.add_url_rule(get_config_param('oauth_callback_route'), 'oauth_callback', self.oauth_callback) def login(self,", "user @property def is_active(self): \"\"\"Required by flask_login\"\"\" return self.user @property def is_authenticated(self): \"\"\"Required", "noqa: F401 # noinspection PyUnresolvedReferences from flask_login import current_user, logout_user, login_required, login_user from", "Apache License, Version 2.0 (the # \"License\"); you may not use this file", "log.debug('OAuth callback called') next_url = request.args.get('state') or url_for('admin.index') if get_config_param('base_url') in next_url: next_url", "self.user.superuser if self.user else False def is_superuser(self): \"\"\"Access all the things\"\"\" return self.user.superuser", "class AuthenticationError(Exception): pass class OAuthBackend(object): def __init__(self): self.login_manager = flask_login.LoginManager() self.login_manager.login_view = 'airflow.login'", "user to OAuth login') scheme = request.environ['HTTP_X_FORWARDED_PROTO'] \\ if 'HTTP_X_FORWARDED_PROTO' in request.environ and", "= url_for('admin.index') resp = self.oauth.authorized_response() try: if resp is None: raise AuthenticationError( 'Null", "import OAuth from airflow import models, configuration from airflow.utils.db import provide_session from airflow.utils.log.logging_mixin", "__init__(self): self.login_manager = flask_login.LoginManager() self.login_manager.login_view = 'airflow.login' self.login_manager.login_message = None self.flask_app = None", "under one # or more contributor license agreements. See the NOTICE file #", "# to you under the Apache License, Version 2.0 (the # \"License\"); you", "required by applicable law or agreed to in writing, # software distributed under", "= OAuth(self.flask_app).remote_app( 'oauth', consumer_key=get_config_param('client_id'), consumer_secret=get_config_param('client_secret'), base_url=get_config_param('base_url'), request_token_params={'scope': [ \"user:info\", \"user:check-access\" ]}, request_token_url=None, access_token_method=get_config_param('access_token_method'),", "getattr(ssl, '_create_unverified_context', None)): ssl._create_default_https_context = ssl._create_unverified_context log = LoggingMixin().log def get_config_param(param): return str(configuration.conf.get('oauth',", "userid == 'None': return None user = session.query(models.User).filter( models.User.id == int(userid)).first() return OAuthUser(user)", "access_token = resp['access_token'] user_info = self.get_user_profile_info(access_token) username_key = get_config_param(\"username_key\") email_key = get_config_param(\"email_key\") username", "= request.environ['HTTP_X_FORWARDED_PROTO'] \\ if 'HTTP_X_FORWARDED_PROTO' in request.environ and request.environ['HTTP_X_FORWARDED_PROTO'] \\ else request.scheme if", "# specific language governing permissions and limitations # under the License. from importlib", "False def get_id(self): \"\"\"Returns the current user id as required by flask_login\"\"\" return", "by applicable law or agreed to in writing, # software distributed under the", "self.user.superuser if self.user else False class AuthenticationError(Exception): pass class OAuthBackend(object): def __init__(self): self.login_manager", "for additional information # regarding copyright ownership. The ASF licenses this file #", "redirect(url_for('airflow.noaccess')) user = session.query(models.User).filter( models.User.username == username).first() if not authorized: if user: session.delete(user)", "if self.user else False def is_superuser(self): \"\"\"Access all the things\"\"\" return self.user.superuser if", "return self.oauth.authorize(callback=url_for( 'oauth_callback', _scheme=scheme, _external=True), state=request.args.get('next') or request.referrer or None) def get_user_profile_info(self, access_token):", "or request.referrer or None) def get_user_profile_info(self, access_token): resp = self.oauth.get( get_config_param(\"user_info_url\"), token=(access_token, ''))", "is_authenticated(self): \"\"\"Required by flask_login\"\"\" return self.user @property def is_anonymous(self): \"\"\"Required by flask_login\"\"\" return", "allowed to access airflow, 2. if the user is a superuser \"\"\" if", "the License for the # specific language governing permissions and limitations # under", "import import_module import flask_login from flask import url_for, redirect, request # Need to", "self.authorize(resp, user_info) except AuthenticationError: return redirect(url_for('airflow.noaccess')) user = session.query(models.User).filter( models.User.username == username).first() if", "applicable law or agreed to in writing, # software distributed under the License", "request # Need to expose these downstream # flake8: noqa: F401 # noinspection", "def is_authenticated(self): \"\"\"Required by flask_login\"\"\" return self.user @property def is_anonymous(self): \"\"\"Required by flask_login\"\"\"", "profile, status ({0})'.format( resp.status if resp else 'None')) return resp.data def dict_get(self, dic,", "'Failed to fetch user profile, status ({0})'.format( resp.status if resp else 'None')) return", "get_config_param(param): return str(configuration.conf.get('oauth', param)) def has_config_param(param): return configuration.conf.has_option('oauth', param) class OAuthUser(models.User): def __init__(self," ]
[ "'auto_zip'): fh_type: Type[RotatingFileHandler] = ZippedRotatingFileHandler else: fh_type = RotatingFileHandler fh = fh_type( file_name", "config['logging']['file_name'] if config.getboolean('logging', 'auto_zip'): fh_type: Type[RotatingFileHandler] = ZippedRotatingFileHandler else: fh_type = RotatingFileHandler fh", "from logging.handlers import RotatingFileHandler from typing import Type from src.framework import get_config, make_config_files,", "ZippedRotatingFileHandler else: fh_type = RotatingFileHandler fh = fh_type( file_name + '.txt', mode='w', encoding='utf-8',", "= RotatingFileHandler fh = fh_type( file_name + '.txt', mode='w', encoding='utf-8', maxBytes=config.getfilesize('logging', 'max_file_size'), #", "getLogger() log.setLevel(config.getint('logging', 'level')) file_name = config['logging']['file_name'] if config.getboolean('logging', 'auto_zip'): fh_type: Type[RotatingFileHandler] = ZippedRotatingFileHandler", "range(256)]) # optional teardown # ... finally: # required teardown log.removeHandler(fh) fh.flush() fh.close()", "else: fh_type = RotatingFileHandler fh = fh_type( file_name + '.txt', mode='w', encoding='utf-8', maxBytes=config.getfilesize('logging',", "log.addHandler(fh) # setup done try: run_jobs(_sleeper, [(x, ) for x in range(256)]) #", "# setup section make_config_files() config = get_config() formatter = Formatter(config['logging']['format'] % ()) log", "src.framework import get_config, make_config_files, run_jobs, _sleeper from src.zipped_logs import ZippedRotatingFileHandler if __name__ ==", "fh_type( file_name + '.txt', mode='w', encoding='utf-8', maxBytes=config.getfilesize('logging', 'max_file_size'), # type: ignore # pylint:", "if __name__ == '__main__': # setup section make_config_files() config = get_config() formatter =", "pylint: disable=no-member backupCount=config.getfilesize('logging', 'backup_count'), # type: ignore # pylint: disable=no-member ) fh.setLevel(config.getint('logging', 'level'))", "# type: ignore # pylint: disable=no-member backupCount=config.getfilesize('logging', 'backup_count'), # type: ignore # pylint:", "disable=no-member backupCount=config.getfilesize('logging', 'backup_count'), # type: ignore # pylint: disable=no-member ) fh.setLevel(config.getint('logging', 'level')) fh.setFormatter(formatter)", "ignore # pylint: disable=no-member backupCount=config.getfilesize('logging', 'backup_count'), # type: ignore # pylint: disable=no-member )", "typing import Type from src.framework import get_config, make_config_files, run_jobs, _sleeper from src.zipped_logs import", "'level')) file_name = config['logging']['file_name'] if config.getboolean('logging', 'auto_zip'): fh_type: Type[RotatingFileHandler] = ZippedRotatingFileHandler else: fh_type", "x in range(256)]) # optional teardown # ... finally: # required teardown log.removeHandler(fh)", "# type: ignore # pylint: disable=no-member ) fh.setLevel(config.getint('logging', 'level')) fh.setFormatter(formatter) log.addHandler(fh) # setup", "for x in range(256)]) # optional teardown # ... finally: # required teardown", "mode='w', encoding='utf-8', maxBytes=config.getfilesize('logging', 'max_file_size'), # type: ignore # pylint: disable=no-member backupCount=config.getfilesize('logging', 'backup_count'), #", "= getLogger() log.setLevel(config.getint('logging', 'level')) file_name = config['logging']['file_name'] if config.getboolean('logging', 'auto_zip'): fh_type: Type[RotatingFileHandler] =", "= fh_type( file_name + '.txt', mode='w', encoding='utf-8', maxBytes=config.getfilesize('logging', 'max_file_size'), # type: ignore #", "logging import getLogger, Formatter from logging.handlers import RotatingFileHandler from typing import Type from", "fh_type: Type[RotatingFileHandler] = ZippedRotatingFileHandler else: fh_type = RotatingFileHandler fh = fh_type( file_name +", "type: ignore # pylint: disable=no-member ) fh.setLevel(config.getint('logging', 'level')) fh.setFormatter(formatter) log.addHandler(fh) # setup done", "section make_config_files() config = get_config() formatter = Formatter(config['logging']['format'] % ()) log = getLogger()", "getLogger, Formatter from logging.handlers import RotatingFileHandler from typing import Type from src.framework import", "= Formatter(config['logging']['format'] % ()) log = getLogger() log.setLevel(config.getint('logging', 'level')) file_name = config['logging']['file_name'] if", "formatter = Formatter(config['logging']['format'] % ()) log = getLogger() log.setLevel(config.getint('logging', 'level')) file_name = config['logging']['file_name']", "file_name = config['logging']['file_name'] if config.getboolean('logging', 'auto_zip'): fh_type: Type[RotatingFileHandler] = ZippedRotatingFileHandler else: fh_type =", "backupCount=config.getfilesize('logging', 'backup_count'), # type: ignore # pylint: disable=no-member ) fh.setLevel(config.getint('logging', 'level')) fh.setFormatter(formatter) log.addHandler(fh)", "__name__ == '__main__': # setup section make_config_files() config = get_config() formatter = Formatter(config['logging']['format']", "[(x, ) for x in range(256)]) # optional teardown # ... finally: #", "fh_type = RotatingFileHandler fh = fh_type( file_name + '.txt', mode='w', encoding='utf-8', maxBytes=config.getfilesize('logging', 'max_file_size'),", "pylint: disable=no-member ) fh.setLevel(config.getint('logging', 'level')) fh.setFormatter(formatter) log.addHandler(fh) # setup done try: run_jobs(_sleeper, [(x,", "file_name + '.txt', mode='w', encoding='utf-8', maxBytes=config.getfilesize('logging', 'max_file_size'), # type: ignore # pylint: disable=no-member", "Formatter from logging.handlers import RotatingFileHandler from typing import Type from src.framework import get_config,", "get_config() formatter = Formatter(config['logging']['format'] % ()) log = getLogger() log.setLevel(config.getint('logging', 'level')) file_name =", "type: ignore # pylint: disable=no-member backupCount=config.getfilesize('logging', 'backup_count'), # type: ignore # pylint: disable=no-member", "from typing import Type from src.framework import get_config, make_config_files, run_jobs, _sleeper from src.zipped_logs", "from logging import getLogger, Formatter from logging.handlers import RotatingFileHandler from typing import Type", "'max_file_size'), # type: ignore # pylint: disable=no-member backupCount=config.getfilesize('logging', 'backup_count'), # type: ignore #", "% ()) log = getLogger() log.setLevel(config.getint('logging', 'level')) file_name = config['logging']['file_name'] if config.getboolean('logging', 'auto_zip'):", "import getLogger, Formatter from logging.handlers import RotatingFileHandler from typing import Type from src.framework", "RotatingFileHandler from typing import Type from src.framework import get_config, make_config_files, run_jobs, _sleeper from", "== '__main__': # setup section make_config_files() config = get_config() formatter = Formatter(config['logging']['format'] %", "in range(256)]) # optional teardown # ... finally: # required teardown log.removeHandler(fh) fh.flush()", "ZippedRotatingFileHandler if __name__ == '__main__': # setup section make_config_files() config = get_config() formatter", "setup done try: run_jobs(_sleeper, [(x, ) for x in range(256)]) # optional teardown", "= get_config() formatter = Formatter(config['logging']['format'] % ()) log = getLogger() log.setLevel(config.getint('logging', 'level')) file_name", "'backup_count'), # type: ignore # pylint: disable=no-member ) fh.setLevel(config.getint('logging', 'level')) fh.setFormatter(formatter) log.addHandler(fh) #", "from src.zipped_logs import ZippedRotatingFileHandler if __name__ == '__main__': # setup section make_config_files() config", ") for x in range(256)]) # optional teardown # ... finally: # required", "# pylint: disable=no-member ) fh.setLevel(config.getint('logging', 'level')) fh.setFormatter(formatter) log.addHandler(fh) # setup done try: run_jobs(_sleeper,", "fh.setLevel(config.getint('logging', 'level')) fh.setFormatter(formatter) log.addHandler(fh) # setup done try: run_jobs(_sleeper, [(x, ) for x", "# setup done try: run_jobs(_sleeper, [(x, ) for x in range(256)]) # optional", "make_config_files, run_jobs, _sleeper from src.zipped_logs import ZippedRotatingFileHandler if __name__ == '__main__': # setup", "from src.framework import get_config, make_config_files, run_jobs, _sleeper from src.zipped_logs import ZippedRotatingFileHandler if __name__", "src.zipped_logs import ZippedRotatingFileHandler if __name__ == '__main__': # setup section make_config_files() config =", "done try: run_jobs(_sleeper, [(x, ) for x in range(256)]) # optional teardown #", "+ '.txt', mode='w', encoding='utf-8', maxBytes=config.getfilesize('logging', 'max_file_size'), # type: ignore # pylint: disable=no-member backupCount=config.getfilesize('logging',", "encoding='utf-8', maxBytes=config.getfilesize('logging', 'max_file_size'), # type: ignore # pylint: disable=no-member backupCount=config.getfilesize('logging', 'backup_count'), # type:", "Formatter(config['logging']['format'] % ()) log = getLogger() log.setLevel(config.getint('logging', 'level')) file_name = config['logging']['file_name'] if config.getboolean('logging',", "_sleeper from src.zipped_logs import ZippedRotatingFileHandler if __name__ == '__main__': # setup section make_config_files()", "run_jobs(_sleeper, [(x, ) for x in range(256)]) # optional teardown # ... finally:", "import Type from src.framework import get_config, make_config_files, run_jobs, _sleeper from src.zipped_logs import ZippedRotatingFileHandler", "logging.handlers import RotatingFileHandler from typing import Type from src.framework import get_config, make_config_files, run_jobs,", "log = getLogger() log.setLevel(config.getint('logging', 'level')) file_name = config['logging']['file_name'] if config.getboolean('logging', 'auto_zip'): fh_type: Type[RotatingFileHandler]", "make_config_files() config = get_config() formatter = Formatter(config['logging']['format'] % ()) log = getLogger() log.setLevel(config.getint('logging',", "'__main__': # setup section make_config_files() config = get_config() formatter = Formatter(config['logging']['format'] % ())", ") fh.setLevel(config.getint('logging', 'level')) fh.setFormatter(formatter) log.addHandler(fh) # setup done try: run_jobs(_sleeper, [(x, ) for", "()) log = getLogger() log.setLevel(config.getint('logging', 'level')) file_name = config['logging']['file_name'] if config.getboolean('logging', 'auto_zip'): fh_type:", "fh = fh_type( file_name + '.txt', mode='w', encoding='utf-8', maxBytes=config.getfilesize('logging', 'max_file_size'), # type: ignore", "maxBytes=config.getfilesize('logging', 'max_file_size'), # type: ignore # pylint: disable=no-member backupCount=config.getfilesize('logging', 'backup_count'), # type: ignore", "# pylint: disable=no-member backupCount=config.getfilesize('logging', 'backup_count'), # type: ignore # pylint: disable=no-member ) fh.setLevel(config.getint('logging',", "Type from src.framework import get_config, make_config_files, run_jobs, _sleeper from src.zipped_logs import ZippedRotatingFileHandler if", "RotatingFileHandler fh = fh_type( file_name + '.txt', mode='w', encoding='utf-8', maxBytes=config.getfilesize('logging', 'max_file_size'), # type:", "setup section make_config_files() config = get_config() formatter = Formatter(config['logging']['format'] % ()) log =", "import get_config, make_config_files, run_jobs, _sleeper from src.zipped_logs import ZippedRotatingFileHandler if __name__ == '__main__':", "'.txt', mode='w', encoding='utf-8', maxBytes=config.getfilesize('logging', 'max_file_size'), # type: ignore # pylint: disable=no-member backupCount=config.getfilesize('logging', 'backup_count'),", "config.getboolean('logging', 'auto_zip'): fh_type: Type[RotatingFileHandler] = ZippedRotatingFileHandler else: fh_type = RotatingFileHandler fh = fh_type(", "try: run_jobs(_sleeper, [(x, ) for x in range(256)]) # optional teardown # ...", "get_config, make_config_files, run_jobs, _sleeper from src.zipped_logs import ZippedRotatingFileHandler if __name__ == '__main__': #", "'level')) fh.setFormatter(formatter) log.addHandler(fh) # setup done try: run_jobs(_sleeper, [(x, ) for x in", "ignore # pylint: disable=no-member ) fh.setLevel(config.getint('logging', 'level')) fh.setFormatter(formatter) log.addHandler(fh) # setup done try:", "config = get_config() formatter = Formatter(config['logging']['format'] % ()) log = getLogger() log.setLevel(config.getint('logging', 'level'))", "if config.getboolean('logging', 'auto_zip'): fh_type: Type[RotatingFileHandler] = ZippedRotatingFileHandler else: fh_type = RotatingFileHandler fh =", "import ZippedRotatingFileHandler if __name__ == '__main__': # setup section make_config_files() config = get_config()", "= config['logging']['file_name'] if config.getboolean('logging', 'auto_zip'): fh_type: Type[RotatingFileHandler] = ZippedRotatingFileHandler else: fh_type = RotatingFileHandler", "run_jobs, _sleeper from src.zipped_logs import ZippedRotatingFileHandler if __name__ == '__main__': # setup section", "log.setLevel(config.getint('logging', 'level')) file_name = config['logging']['file_name'] if config.getboolean('logging', 'auto_zip'): fh_type: Type[RotatingFileHandler] = ZippedRotatingFileHandler else:", "import RotatingFileHandler from typing import Type from src.framework import get_config, make_config_files, run_jobs, _sleeper", "disable=no-member ) fh.setLevel(config.getint('logging', 'level')) fh.setFormatter(formatter) log.addHandler(fh) # setup done try: run_jobs(_sleeper, [(x, )", "= ZippedRotatingFileHandler else: fh_type = RotatingFileHandler fh = fh_type( file_name + '.txt', mode='w',", "fh.setFormatter(formatter) log.addHandler(fh) # setup done try: run_jobs(_sleeper, [(x, ) for x in range(256)])", "Type[RotatingFileHandler] = ZippedRotatingFileHandler else: fh_type = RotatingFileHandler fh = fh_type( file_name + '.txt'," ]
[ "= torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation ==", "== 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None:", "# conv block of inference layer self.conv_block = ConvBlock(n_feats, n_feats, 3, 1, 1,", "self.conv_block = ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None) # reconstruction layer self.reconstruction_layer =", "super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm", "w self.w_init = torch.ones(self.num_recursions) / self.num_recursions self.w = Variable(self.w_init.cuda(), requires_grad=True) def forward(self, x):", "ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None) # reconstruction layer self.reconstruction_layer = nn.Sequential( ConvBlock(n_feats,", "parent=False): return DRCN(args) class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True,", "stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size)", "bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm", "self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation", "return out class DRCN(torch.nn.Module): def __init__(self, args): super(DRCN, self).__init__() n_colors = args.n_colors n_feats", ") # conv block of inference layer self.conv_block = ConvBlock(n_feats, n_feats, 3, 1,", "super(DRCN, self).__init__() n_colors = args.n_colors n_feats = args.n_feats num_recursions = 16 self.num_recursions =", "torch.nn import torchvision.transforms as transforms import PIL def make_model(args, parent=False): return DRCN(args) class", "1, norm=None), ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None) ) # conv block of", "activation=None, norm=None) ) # initial w self.w_init = torch.ones(self.num_recursions) / self.num_recursions self.w =", "import torchvision.transforms as transforms import PIL def make_model(args, parent=False): return DRCN(args) class ConvBlock(torch.nn.Module):", "<reponame>saeedizadi/EDSR-PyTorch import os import torch.nn as nn import torch.optim as optim from torch.autograd", "is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is", "self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True)", "PIL def make_model(args, parent=False): return DRCN(args) class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4,", "self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid()", "None: return self.act(out) else: return out class DRCN(torch.nn.Module): def __init__(self, args): super(DRCN, self).__init__()", "DRCN(torch.nn.Module): def __init__(self, args): super(DRCN, self).__init__() n_colors = args.n_colors n_feats = args.n_feats num_recursions", "= torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu':", "n_feats, 3, 1, 1, norm=None) ) # conv block of inference layer self.conv_block", "norm='batch'): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm =", "Variable import torch.nn import torchvision.transforms as transforms import PIL def make_model(args, parent=False): return", "= nn.Sequential( ConvBlock(n_feats, n_feats, 3, 1, 1, activation=None, norm=None), ConvBlock(n_feats, n_colors, 3, 1,", "args.n_colors n_feats = args.n_feats num_recursions = 16 self.num_recursions = num_recursions # embedding layer", "__init__(self, args): super(DRCN, self).__init__() n_colors = args.n_colors n_feats = args.n_feats num_recursions = 16", "padding=1, bias=True, activation='relu', norm='batch'): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding,", "self.activation is not None: return self.act(out) else: return out class DRCN(torch.nn.Module): def __init__(self,", "out_sum += torch.mul(y_d_[d], self.w[d]) out_sum = torch.mul(out_sum, 1.0 / (torch.sum(self.w))) # skip connection", "self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2,", "y_d_.append(self.reconstruction_layer(h[d+1])) out_sum += torch.mul(y_d_[d], self.w[d]) out_sum = torch.mul(out_sum, 1.0 / (torch.sum(self.w))) # skip", "3, 1, 1, norm=None) ) # conv block of inference layer self.conv_block =", "padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif", "nn.Sequential( ConvBlock(n_feats, n_feats, 3, 1, 1, activation=None, norm=None), ConvBlock(n_feats, n_colors, 3, 1, 1,", "optim from torch.autograd import Variable import torch.nn import torchvision.transforms as transforms import PIL", "Variable(self.w_init.cuda(), requires_grad=True) def forward(self, x): # embedding layer h0 = self.embedding_layer(x) # recursions", "= ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None) # reconstruction layer self.reconstruction_layer = nn.Sequential(", "x): # embedding layer h0 = self.embedding_layer(x) # recursions h = [h0] for", "torch.autograd import Variable import torch.nn import torchvision.transforms as transforms import PIL def make_model(args,", "self.w_init = torch.ones(self.num_recursions) / self.num_recursions self.w = Variable(self.w_init.cuda(), requires_grad=True) def forward(self, x): #", "requires_grad=True) def forward(self, x): # embedding layer h0 = self.embedding_layer(x) # recursions h", "'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation", "embedding layer self.embedding_layer = nn.Sequential( ConvBlock(n_colors, n_feats, 3, 1, 1, norm=None), ConvBlock(n_feats, n_feats,", "self.embedding_layer(x) # recursions h = [h0] for d in range(self.num_recursions): h.append(self.conv_block(h[d])) y_d_ =", "self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out =", "if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn =", "= torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if", "return self.act(out) else: return out class DRCN(torch.nn.Module): def __init__(self, args): super(DRCN, self).__init__() n_colors", "# embedding layer h0 = self.embedding_layer(x) # recursions h = [h0] for d", "def make_model(args, parent=False): return DRCN(args) class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2,", "self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm", "'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self,", "[] out_sum = 0 for d in range(self.num_recursions): y_d_.append(self.reconstruction_layer(h[d+1])) out_sum += torch.mul(y_d_[d], self.w[d])", "torchvision.transforms as transforms import PIL def make_model(args, parent=False): return DRCN(args) class ConvBlock(torch.nn.Module): def", "out = self.conv(x) if self.activation is not None: return self.act(out) else: return out", "x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x)", "self.conv(x) if self.activation is not None: return self.act(out) else: return out class DRCN(torch.nn.Module):", "in range(self.num_recursions): h.append(self.conv_block(h[d])) y_d_ = [] out_sum = 0 for d in range(self.num_recursions):", "if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act =", "class DRCN(torch.nn.Module): def __init__(self, args): super(DRCN, self).__init__() n_colors = args.n_colors n_feats = args.n_feats", "activation=None, norm=None), ConvBlock(n_feats, n_colors, 3, 1, 1, activation=None, norm=None) ) # initial w", "= torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x))", "from torch.autograd import Variable import torch.nn import torchvision.transforms as transforms import PIL def", "n_colors, 3, 1, 1, activation=None, norm=None) ) # initial w self.w_init = torch.ones(self.num_recursions)", "import PIL def make_model(args, parent=False): return DRCN(args) class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size,", "= torch.ones(self.num_recursions) / self.num_recursions self.w = Variable(self.w_init.cuda(), requires_grad=True) def forward(self, x): # embedding", "layer self.embedding_layer = nn.Sequential( ConvBlock(n_colors, n_feats, 3, 1, 1, norm=None), ConvBlock(n_feats, n_feats, 3,", ") # initial w self.w_init = torch.ones(self.num_recursions) / self.num_recursions self.w = Variable(self.w_init.cuda(), requires_grad=True)", "d in range(self.num_recursions): h.append(self.conv_block(h[d])) y_d_ = [] out_sum = 0 for d in", "import torch.nn import torchvision.transforms as transforms import PIL def make_model(args, parent=False): return DRCN(args)", "nn import torch.optim as optim from torch.autograd import Variable import torch.nn import torchvision.transforms", "ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None) ) # conv block of inference layer", "embedding layer h0 = self.embedding_layer(x) # recursions h = [h0] for d in", "self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x):", "norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn", "class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='relu', norm='batch'): super(ConvBlock,", "torch.optim as optim from torch.autograd import Variable import torch.nn import torchvision.transforms as transforms", "'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation =", "num_recursions # embedding layer self.embedding_layer = nn.Sequential( ConvBlock(n_colors, n_feats, 3, 1, 1, norm=None),", "conv block of inference layer self.conv_block = ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None)", "layer self.conv_block = ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None) # reconstruction layer self.reconstruction_layer", "activation='relu', norm='batch'): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm", "os import torch.nn as nn import torch.optim as optim from torch.autograd import Variable", "self.reconstruction_layer = nn.Sequential( ConvBlock(n_feats, n_feats, 3, 1, 1, activation=None, norm=None), ConvBlock(n_feats, n_colors, 3,", "elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act =", "'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out", "self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else:", "make_model(args, parent=False): return DRCN(args) class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1,", "= 0 for d in range(self.num_recursions): y_d_.append(self.reconstruction_layer(h[d+1])) out_sum += torch.mul(y_d_[d], self.w[d]) out_sum =", "1, 1, norm=None) ) # conv block of inference layer self.conv_block = ConvBlock(n_feats,", "import torch.optim as optim from torch.autograd import Variable import torch.nn import torchvision.transforms as", "__init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='relu', norm='batch'): super(ConvBlock, self).__init__() self.conv =", "import Variable import torch.nn import torchvision.transforms as transforms import PIL def make_model(args, parent=False):", "self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if", "block of inference layer self.conv_block = ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None) #", "== 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True)", "self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation", "activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act", "def forward(self, x): # embedding layer h0 = self.embedding_layer(x) # recursions h =", "1, 1, norm=None) # reconstruction layer self.reconstruction_layer = nn.Sequential( ConvBlock(n_feats, n_feats, 3, 1,", "= [h0] for d in range(self.num_recursions): h.append(self.conv_block(h[d])) y_d_ = [] out_sum = 0", "for d in range(self.num_recursions): h.append(self.conv_block(h[d])) y_d_ = [] out_sum = 0 for d", "= activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu':", "input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='relu', norm='batch'): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size,", "stride=2, padding=1, bias=True, activation='relu', norm='batch'): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride,", "== 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def", "ConvBlock(n_feats, n_feats, 3, 1, 1, activation=None, norm=None), ConvBlock(n_feats, n_colors, 3, 1, 1, activation=None,", "self.embedding_layer = nn.Sequential( ConvBlock(n_colors, n_feats, 3, 1, 1, norm=None), ConvBlock(n_feats, n_feats, 3, 1,", "= [] out_sum = 0 for d in range(self.num_recursions): y_d_.append(self.reconstruction_layer(h[d+1])) out_sum += torch.mul(y_d_[d],", "for d in range(self.num_recursions): y_d_.append(self.reconstruction_layer(h[d+1])) out_sum += torch.mul(y_d_[d], self.w[d]) out_sum = torch.mul(out_sum, 1.0", "out_sum = 0 for d in range(self.num_recursions): y_d_.append(self.reconstruction_layer(h[d+1])) out_sum += torch.mul(y_d_[d], self.w[d]) out_sum", "if self.activation is not None: return self.act(out) else: return out class DRCN(torch.nn.Module): def", "= torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if", "of inference layer self.conv_block = ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None) # reconstruction", "self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation", "self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation ==", "h0 = self.embedding_layer(x) # recursions h = [h0] for d in range(self.num_recursions): h.append(self.conv_block(h[d]))", "in range(self.num_recursions): y_d_.append(self.reconstruction_layer(h[d+1])) out_sum += torch.mul(y_d_[d], self.w[d]) out_sum = torch.mul(out_sum, 1.0 / (torch.sum(self.w)))", "def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='relu', norm='batch'): super(ConvBlock, self).__init__() self.conv", "self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU()", "out class DRCN(torch.nn.Module): def __init__(self, args): super(DRCN, self).__init__() n_colors = args.n_colors n_feats =", "else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return", "self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation", "self.w = Variable(self.w_init.cuda(), requires_grad=True) def forward(self, x): # embedding layer h0 = self.embedding_layer(x)", "[h0] for d in range(self.num_recursions): h.append(self.conv_block(h[d])) y_d_ = [] out_sum = 0 for", "self.num_recursions = num_recursions # embedding layer self.embedding_layer = nn.Sequential( ConvBlock(n_colors, n_feats, 3, 1,", "elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation ==", "self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation ==", "3, 1, 1, norm=None) # reconstruction layer self.reconstruction_layer = nn.Sequential( ConvBlock(n_feats, n_feats, 3,", "16 self.num_recursions = num_recursions # embedding layer self.embedding_layer = nn.Sequential( ConvBlock(n_colors, n_feats, 3,", "= args.n_colors n_feats = args.n_feats num_recursions = 16 self.num_recursions = num_recursions # embedding", "1, activation=None, norm=None), ConvBlock(n_feats, n_colors, 3, 1, 1, activation=None, norm=None) ) # initial", "= nn.Sequential( ConvBlock(n_colors, n_feats, 3, 1, 1, norm=None), ConvBlock(n_feats, n_feats, 3, 1, 1,", "import torch.nn as nn import torch.optim as optim from torch.autograd import Variable import", "transforms import PIL def make_model(args, parent=False): return DRCN(args) class ConvBlock(torch.nn.Module): def __init__(self, input_size,", "elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act =", "import os import torch.nn as nn import torch.optim as optim from torch.autograd import", "torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh':", "forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out =", "3, 1, 1, activation=None, norm=None), ConvBlock(n_feats, n_colors, 3, 1, 1, activation=None, norm=None) )", "bias=True, activation='relu', norm='batch'): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias)", "self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size)", "if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if", "self).__init__() n_colors = args.n_colors n_feats = args.n_feats num_recursions = 16 self.num_recursions = num_recursions", "1, norm=None) # reconstruction layer self.reconstruction_layer = nn.Sequential( ConvBlock(n_feats, n_feats, 3, 1, 1,", "output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn", "args): super(DRCN, self).__init__() n_colors = args.n_colors n_feats = args.n_feats num_recursions = 16 self.num_recursions", "as transforms import PIL def make_model(args, parent=False): return DRCN(args) class ConvBlock(torch.nn.Module): def __init__(self,", "1, activation=None, norm=None) ) # initial w self.w_init = torch.ones(self.num_recursions) / self.num_recursions self.w", "out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return", "layer h0 = self.embedding_layer(x) # recursions h = [h0] for d in range(self.num_recursions):", "torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation", "# initial w self.w_init = torch.ones(self.num_recursions) / self.num_recursions self.w = Variable(self.w_init.cuda(), requires_grad=True) def", "as optim from torch.autograd import Variable import torch.nn import torchvision.transforms as transforms import", "kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn =", "norm=None) ) # conv block of inference layer self.conv_block = ConvBlock(n_feats, n_feats, 3,", "h.append(self.conv_block(h[d])) y_d_ = [] out_sum = 0 for d in range(self.num_recursions): y_d_.append(self.reconstruction_layer(h[d+1])) out_sum", "= torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation ==", "3, 1, 1, norm=None), ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None) ) # conv", "reconstruction layer self.reconstruction_layer = nn.Sequential( ConvBlock(n_feats, n_feats, 3, 1, 1, activation=None, norm=None), ConvBlock(n_feats,", "0 for d in range(self.num_recursions): y_d_.append(self.reconstruction_layer(h[d+1])) out_sum += torch.mul(y_d_[d], self.w[d]) out_sum = torch.mul(out_sum,", "recursions h = [h0] for d in range(self.num_recursions): h.append(self.conv_block(h[d])) y_d_ = [] out_sum", "1, 1, activation=None, norm=None), ConvBlock(n_feats, n_colors, 3, 1, 1, activation=None, norm=None) ) #", "torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch':", "range(self.num_recursions): h.append(self.conv_block(h[d])) y_d_ = [] out_sum = 0 for d in range(self.num_recursions): y_d_.append(self.reconstruction_layer(h[d+1]))", "elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act", "kernel_size=4, stride=2, padding=1, bias=True, activation='relu', norm='batch'): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size,", "= args.n_feats num_recursions = 16 self.num_recursions = num_recursions # embedding layer self.embedding_layer =", "= torch.mul(out_sum, 1.0 / (torch.sum(self.w))) # skip connection final_out = torch.add(out_sum, x) return", "ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='relu', norm='batch'): super(ConvBlock, self).__init__()", "self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act =", "norm=None) # reconstruction layer self.reconstruction_layer = nn.Sequential( ConvBlock(n_feats, n_feats, 3, 1, 1, activation=None,", "= norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance':", "torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid':", "self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm ==", "'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif", "not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not", "= torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif", "args.n_feats num_recursions = 16 self.num_recursions = num_recursions # embedding layer self.embedding_layer = nn.Sequential(", "= num_recursions # embedding layer self.embedding_layer = nn.Sequential( ConvBlock(n_colors, n_feats, 3, 1, 1,", "return DRCN(args) class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='relu',", "norm=None) ) # initial w self.w_init = torch.ones(self.num_recursions) / self.num_recursions self.w = Variable(self.w_init.cuda(),", "1, 1, activation=None, norm=None) ) # initial w self.w_init = torch.ones(self.num_recursions) / self.num_recursions", "y_d_ = [] out_sum = 0 for d in range(self.num_recursions): y_d_.append(self.reconstruction_layer(h[d+1])) out_sum +=", "torch.mul(y_d_[d], self.w[d]) out_sum = torch.mul(out_sum, 1.0 / (torch.sum(self.w))) # skip connection final_out =", "is not None: return self.act(out) else: return out class DRCN(torch.nn.Module): def __init__(self, args):", "+= torch.mul(y_d_[d], self.w[d]) out_sum = torch.mul(out_sum, 1.0 / (torch.sum(self.w))) # skip connection final_out", "torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation", "= torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm ==", "self.act(out) else: return out class DRCN(torch.nn.Module): def __init__(self, args): super(DRCN, self).__init__() n_colors =", "initial w self.w_init = torch.ones(self.num_recursions) / self.num_recursions self.w = Variable(self.w_init.cuda(), requires_grad=True) def forward(self,", "= Variable(self.w_init.cuda(), requires_grad=True) def forward(self, x): # embedding layer h0 = self.embedding_layer(x) #", "# reconstruction layer self.reconstruction_layer = nn.Sequential( ConvBlock(n_feats, n_feats, 3, 1, 1, activation=None, norm=None),", "torch.ones(self.num_recursions) / self.num_recursions self.w = Variable(self.w_init.cuda(), requires_grad=True) def forward(self, x): # embedding layer", "self.num_recursions self.w = Variable(self.w_init.cuda(), requires_grad=True) def forward(self, x): # embedding layer h0 =", "# recursions h = [h0] for d in range(self.num_recursions): h.append(self.conv_block(h[d])) y_d_ = []", "'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif", "n_feats, 3, 1, 1, norm=None), ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None) ) #", "n_feats, 3, 1, 1, norm=None) # reconstruction layer self.reconstruction_layer = nn.Sequential( ConvBlock(n_feats, n_feats,", "else: return out class DRCN(torch.nn.Module): def __init__(self, args): super(DRCN, self).__init__() n_colors = args.n_colors", "elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is", "self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu':", "torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act", "torch.mul(out_sum, 1.0 / (torch.sum(self.w))) # skip connection final_out = torch.add(out_sum, x) return final_out", "def __init__(self, args): super(DRCN, self).__init__() n_colors = args.n_colors n_feats = args.n_feats num_recursions =", "ConvBlock(n_feats, n_colors, 3, 1, 1, activation=None, norm=None) ) # initial w self.w_init =", "1, 1, norm=None), ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None) ) # conv block", "num_recursions = 16 self.num_recursions = num_recursions # embedding layer self.embedding_layer = nn.Sequential( ConvBlock(n_colors,", "3, 1, 1, activation=None, norm=None) ) # initial w self.w_init = torch.ones(self.num_recursions) /", "forward(self, x): # embedding layer h0 = self.embedding_layer(x) # recursions h = [h0]", "as nn import torch.optim as optim from torch.autograd import Variable import torch.nn import", "= 16 self.num_recursions = num_recursions # embedding layer self.embedding_layer = nn.Sequential( ConvBlock(n_colors, n_feats,", "DRCN(args) class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='relu', norm='batch'):", "== 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act", "== 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh()", "'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act =", "= self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out)", "n_colors = args.n_colors n_feats = args.n_feats num_recursions = 16 self.num_recursions = num_recursions #", "inference layer self.conv_block = ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None) # reconstruction layer", "norm=None), ConvBlock(n_feats, n_colors, 3, 1, 1, activation=None, norm=None) ) # initial w self.w_init", "self.w[d]) out_sum = torch.mul(out_sum, 1.0 / (torch.sum(self.w))) # skip connection final_out = torch.add(out_sum,", "layer self.reconstruction_layer = nn.Sequential( ConvBlock(n_feats, n_feats, 3, 1, 1, activation=None, norm=None), ConvBlock(n_feats, n_colors,", "nn.Sequential( ConvBlock(n_colors, n_feats, 3, 1, 1, norm=None), ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None)", "d in range(self.num_recursions): y_d_.append(self.reconstruction_layer(h[d+1])) out_sum += torch.mul(y_d_[d], self.w[d]) out_sum = torch.mul(out_sum, 1.0 /", "True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act", "norm=None), ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None) ) # conv block of inference", "n_feats = args.n_feats num_recursions = 16 self.num_recursions = num_recursions # embedding layer self.embedding_layer", "def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out", "output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='relu', norm='batch'): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size,", "self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not", "out_sum = torch.mul(out_sum, 1.0 / (torch.sum(self.w))) # skip connection final_out = torch.add(out_sum, x)", "torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm", "None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None:", "= self.embedding_layer(x) # recursions h = [h0] for d in range(self.num_recursions): h.append(self.conv_block(h[d])) y_d_", "= self.conv(x) if self.activation is not None: return self.act(out) else: return out class", "/ self.num_recursions self.w = Variable(self.w_init.cuda(), requires_grad=True) def forward(self, x): # embedding layer h0", "== 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif", "torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else:", "not None: return self.act(out) else: return out class DRCN(torch.nn.Module): def __init__(self, args): super(DRCN,", "# embedding layer self.embedding_layer = nn.Sequential( ConvBlock(n_colors, n_feats, 3, 1, 1, norm=None), ConvBlock(n_feats,", "n_feats, 3, 1, 1, activation=None, norm=None), ConvBlock(n_feats, n_colors, 3, 1, 1, activation=None, norm=None)", "h = [h0] for d in range(self.num_recursions): h.append(self.conv_block(h[d])) y_d_ = [] out_sum =", "== 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation", "range(self.num_recursions): y_d_.append(self.reconstruction_layer(h[d+1])) out_sum += torch.mul(y_d_[d], self.w[d]) out_sum = torch.mul(out_sum, 1.0 / (torch.sum(self.w))) #", "torch.nn as nn import torch.optim as optim from torch.autograd import Variable import torch.nn", "ConvBlock(n_colors, n_feats, 3, 1, 1, norm=None), ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None) )", "1, norm=None) ) # conv block of inference layer self.conv_block = ConvBlock(n_feats, n_feats," ]
[]
[ "mode == 'human': self.viewer = MjViewer(self.sim, self.name if hasattr(self, 'name') else None) elif", "HalfCheetahEnvExt from gym.envs.mujoco.mujoco_env import MujocoEnv import mujoco_py from mapping.mjviewerext import MjViewerExt as MjViewer", "gym.envs.mujoco.mujoco_env import MujocoEnv import mujoco_py from mapping.mjviewerext import MjViewerExt as MjViewer def _get_viewer(self,", "import InvertedPendulumEnvExt from gym_ext.envs.HalfCheetah_ext import HalfCheetahEnvExt from gym.envs.mujoco.mujoco_env import MujocoEnv import mujoco_py from", "import MujocoEnv import mujoco_py from mapping.mjviewerext import MjViewerExt as MjViewer def _get_viewer(self, mode):", "if mode == 'human': self.viewer = MjViewer(self.sim, self.name if hasattr(self, 'name') else None)", "'name') else None) elif mode == 'rgb_array' or mode == 'depth_array': self.viewer =", "hasattr(self, 'name') else None) elif mode == 'rgb_array' or mode == 'depth_array': self.viewer", "else None) elif mode == 'rgb_array' or mode == 'depth_array': self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim,", "elif mode == 'rgb_array' or mode == 'depth_array': self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1) self.viewer_setup()", "== 'depth_array': self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1) self.viewer_setup() self._viewers[mode] = self.viewer return self.viewer setattr(MujocoEnv,", "self._viewers.get(mode) if self.viewer is None: if mode == 'human': self.viewer = MjViewer(self.sim, self.name", "None: if mode == 'human': self.viewer = MjViewer(self.sim, self.name if hasattr(self, 'name') else", "_get_viewer(self, mode): self.viewer = self._viewers.get(mode) if self.viewer is None: if mode == 'human':", "gym_ext.envs.inverted_pendulum_ext import InvertedPendulumEnvExt from gym_ext.envs.HalfCheetah_ext import HalfCheetahEnvExt from gym.envs.mujoco.mujoco_env import MujocoEnv import mujoco_py", "'human': self.viewer = MjViewer(self.sim, self.name if hasattr(self, 'name') else None) elif mode ==", "'depth_array': self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1) self.viewer_setup() self._viewers[mode] = self.viewer return self.viewer setattr(MujocoEnv, '_get_viewer',", "gym_ext.envs.HalfCheetah_ext import HalfCheetahEnvExt from gym.envs.mujoco.mujoco_env import MujocoEnv import mujoco_py from mapping.mjviewerext import MjViewerExt", "if self.viewer is None: if mode == 'human': self.viewer = MjViewer(self.sim, self.name if", "MjViewer def _get_viewer(self, mode): self.viewer = self._viewers.get(mode) if self.viewer is None: if mode", "InvertedPendulumEnvExt from gym_ext.envs.HalfCheetah_ext import HalfCheetahEnvExt from gym.envs.mujoco.mujoco_env import MujocoEnv import mujoco_py from mapping.mjviewerext", "is None: if mode == 'human': self.viewer = MjViewer(self.sim, self.name if hasattr(self, 'name')", "self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1) self.viewer_setup() self._viewers[mode] = self.viewer return self.viewer setattr(MujocoEnv, '_get_viewer', _get_viewer)", "def _get_viewer(self, mode): self.viewer = self._viewers.get(mode) if self.viewer is None: if mode ==", "mujoco_py from mapping.mjviewerext import MjViewerExt as MjViewer def _get_viewer(self, mode): self.viewer = self._viewers.get(mode)", "import HalfCheetahEnvExt from gym.envs.mujoco.mujoco_env import MujocoEnv import mujoco_py from mapping.mjviewerext import MjViewerExt as", "== 'human': self.viewer = MjViewer(self.sim, self.name if hasattr(self, 'name') else None) elif mode", "self.name if hasattr(self, 'name') else None) elif mode == 'rgb_array' or mode ==", "from gym.envs.mujoco.mujoco_env import MujocoEnv import mujoco_py from mapping.mjviewerext import MjViewerExt as MjViewer def", "None) elif mode == 'rgb_array' or mode == 'depth_array': self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1)", "or mode == 'depth_array': self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1) self.viewer_setup() self._viewers[mode] = self.viewer return", "mode): self.viewer = self._viewers.get(mode) if self.viewer is None: if mode == 'human': self.viewer", "MujocoEnv import mujoco_py from mapping.mjviewerext import MjViewerExt as MjViewer def _get_viewer(self, mode): self.viewer", "mapping.mjviewerext import MjViewerExt as MjViewer def _get_viewer(self, mode): self.viewer = self._viewers.get(mode) if self.viewer", "= 'DafniAntotsiou' from gym_ext.envs.inverted_pendulum_ext import InvertedPendulumEnvExt from gym_ext.envs.HalfCheetah_ext import HalfCheetahEnvExt from gym.envs.mujoco.mujoco_env import", "= self._viewers.get(mode) if self.viewer is None: if mode == 'human': self.viewer = MjViewer(self.sim,", "MjViewer(self.sim, self.name if hasattr(self, 'name') else None) elif mode == 'rgb_array' or mode", "__author__ = 'DafniAntotsiou' from gym_ext.envs.inverted_pendulum_ext import InvertedPendulumEnvExt from gym_ext.envs.HalfCheetah_ext import HalfCheetahEnvExt from gym.envs.mujoco.mujoco_env", "from gym_ext.envs.HalfCheetah_ext import HalfCheetahEnvExt from gym.envs.mujoco.mujoco_env import MujocoEnv import mujoco_py from mapping.mjviewerext import", "self.viewer = self._viewers.get(mode) if self.viewer is None: if mode == 'human': self.viewer =", "= MjViewer(self.sim, self.name if hasattr(self, 'name') else None) elif mode == 'rgb_array' or", "mode == 'depth_array': self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1) self.viewer_setup() self._viewers[mode] = self.viewer return self.viewer", "self.viewer is None: if mode == 'human': self.viewer = MjViewer(self.sim, self.name if hasattr(self,", "if hasattr(self, 'name') else None) elif mode == 'rgb_array' or mode == 'depth_array':", "from mapping.mjviewerext import MjViewerExt as MjViewer def _get_viewer(self, mode): self.viewer = self._viewers.get(mode) if", "'DafniAntotsiou' from gym_ext.envs.inverted_pendulum_ext import InvertedPendulumEnvExt from gym_ext.envs.HalfCheetah_ext import HalfCheetahEnvExt from gym.envs.mujoco.mujoco_env import MujocoEnv", "mode == 'rgb_array' or mode == 'depth_array': self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1) self.viewer_setup() self._viewers[mode]", "== 'rgb_array' or mode == 'depth_array': self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1) self.viewer_setup() self._viewers[mode] =", "MjViewerExt as MjViewer def _get_viewer(self, mode): self.viewer = self._viewers.get(mode) if self.viewer is None:", "from gym_ext.envs.inverted_pendulum_ext import InvertedPendulumEnvExt from gym_ext.envs.HalfCheetah_ext import HalfCheetahEnvExt from gym.envs.mujoco.mujoco_env import MujocoEnv import", "self.viewer = MjViewer(self.sim, self.name if hasattr(self, 'name') else None) elif mode == 'rgb_array'", "as MjViewer def _get_viewer(self, mode): self.viewer = self._viewers.get(mode) if self.viewer is None: if", "import mujoco_py from mapping.mjviewerext import MjViewerExt as MjViewer def _get_viewer(self, mode): self.viewer =", "'rgb_array' or mode == 'depth_array': self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1) self.viewer_setup() self._viewers[mode] = self.viewer", "import MjViewerExt as MjViewer def _get_viewer(self, mode): self.viewer = self._viewers.get(mode) if self.viewer is" ]
[ "as pd from time import asctime from omnipickle_manager import OmnipickleManager import global_settings as", "settings from helpyr import data_loading from helpyr import logger #from helpyr import crawler", "scan_name, sta_str, _ = gsd_name.split('_') exp_code, step, period, scan_length = scan_name.split('-') # Calculate", "os.path.join(gsd_dir, gsf_name) # Both data files are read exactly the same, do in", "period, scan_length = scan_name.split('-') # Calculate experiment time based on step and period", ": 0.71, '1' : 1 , '1.4' : 1.41, '2' : 2 ,", "self.all_data) if __name__ == \"__main__\": # Run the script gsd_processor = GSDProcessor() gsd_processor.run()", "+ [scan_name, exp_time] for var_name, var_val in zip(var_names, var_vals): run_data[var_name] = var_val run_data_frames.append(run_data)", "crawler.get_target_files( # \"??-*L-t??-8m_sta-*_GrainSize.txt\", verbose_file_list=False) # example filename: 3B-r87L-t60-8m_sta-2000_GrainSize.txt #crawler.end() #gsd_list_path = '/home/alex/feed-timing/code/matlab/supporting-files/2m_2B_final_gsd_list.txt' gsd_list_paths", "GrainSize.txt files self.logger.write(\"\") #crawler = crawler.Crawler(logger=self.logger) #crawler.set_root(self.root) #self.gsd_txt_filepaths = crawler.get_target_files( # \"??-*L-t??-8m_sta-*_GrainSize.txt\", verbose_file_list=False)", "run_data = pd.DataFrame() for filepath in [gsd_filepath, gsf_filepath]: # Load data and set", "set to handed 2m gsd # Doing it the quick way.... # Search", "for gsd_list_path in gsd_list_paths: with open(gsd_list_path) as f: self.logger.write(f'Reading from {gsd_list_path}') self.gsd_txt_filepaths.extend(f.read().splitlines()) def", "hm # currently set to handed 2m gsd # Doing it the quick", "#print(self.omnimanager.experiments['1B'].periods[35].gsd_picklepath) def find_gsd_txt_files(self): # Find all the GrainSize.txt files self.logger.write(\"\") #crawler = crawler.Crawler(logger=self.logger)", "+ 60*(d + 2*l*(n_discharges-1-d)) exp_time = calc_time(is_falling, discharge_index, period_time) # Generate name to", "not os.path.isfile(gsd_filepath): self.logger.write(f\"Missing file {gsd_filepath}\") continue # Pull apart provided filepath to GrainSize.txt", "run_data = pd.concat([run_data, data], axis=1) # Add columns that will be later used", "= lambda l, d, t: t + 60*(d + 2*l*(n_discharges-1-d)) exp_time = calc_time(is_falling,", ", '1.4' : 1.41, '2' : 2 , '2.8' : 2.83, '4' :", "data col_conv = { '0.5' : 0.5 , '0.71' : 0.71, '1' :", "experiment time based on step and period is_falling = step[0] == 'f' discharge", "the same, do in a loop run_data = pd.DataFrame() for filepath in [gsd_filepath,", "= f\"{settings.log_dir}/gsd_processor.txt\" # Start up logger self.logger = logger.Logger(self.log_filepath, default_verbose=True) self.logger.write([\"Begin GSD Processor", "Search for: self.gsd_txt_filepaths = class GSDProcessor: \"\"\" Collects combines the distributed GrainSize.txt files.", "'1' : 1 , '1.4' : 1.41, '2' : 2 , '2.8' :", "'4' : 4 , '5.6' : 5.66, '8' : 8 , '11.3' :", "as f: self.logger.write(f'Reading from {gsd_list_path}') self.gsd_txt_filepaths.extend(f.read().splitlines()) def load_data(self): # Load all the GrainSize.txt", "1) gsd_name = gsd_name.split('.', 1)[0] scan_name, sta_str, _ = gsd_name.split('_') exp_code, step, period,", "t: t + 60*(d + 2*l*(n_discharges-1-d)) exp_time = calc_time(is_falling, discharge_index, period_time) # Generate", "= { 'index_col' : None, 'header' : 0, 'skiprows' : [1], } run_data_frames", "float to make consistent with # sieve data col_conv = { '0.5' :", "l, d, t: t + 60*(d + 2*l*(n_discharges-1-d)) exp_time = calc_time(is_falling, discharge_index, period_time)", "{ '0.5' : 0.5 , '0.71' : 0.71, '1' : 1 , '1.4'", "to combined data self.all_data = pd.concat(run_data_frames, ignore_index=True) self.all_data.set_index(index_names, inplace=True) self.all_data.sort_index(inplace=True) # Convert size", "'1.4' : 1.41, '2' : 2 , '2.8' : 2.83, '4' : 4", "exp_time] for var_name, var_val in zip(var_names, var_vals): run_data[var_name] = var_val run_data_frames.append(run_data) # Add", "indent_function(self.load_data, before_msg=\"Loading and merging data\", after_msg=\"Finished!\") indent_function(self.update_omnipickle, before_msg=\"Updating omnipickle\", after_msg=\"Finished!\") indent_function(self.omnimanager.store, kwargs={'overwrite':{'gsd':True}}, before_msg=\"Storing", "consistent with # sieve data col_conv = { '0.5' : 0.5 , '0.71'", "crawler.Crawler(logger=self.logger) #crawler.set_root(self.root) #self.gsd_txt_filepaths = crawler.get_target_files( # \"??-*L-t??-8m_sta-*_GrainSize.txt\", verbose_file_list=False) # example filename: 3B-r87L-t60-8m_sta-2000_GrainSize.txt #crawler.end()", "c \\ for c in self.all_data.columns] def update_omnipickle(self): # Add gsd data to", "= settings.cart_data_dir self.pickle_destination = settings.cart_pickles_dir self.log_filepath = f\"{settings.log_dir}/gsd_processor.txt\" # Start up logger self.logger", "helpyr_misc as hm # currently set to handed 2m gsd # Doing it", "with open(gsd_list_path) as f: self.logger.write(f'Reading from {gsd_list_path}') self.gsd_txt_filepaths.extend(f.read().splitlines()) def load_data(self): # Load all", "f: self.logger.write(f'Reading from {gsd_list_path}') self.gsd_txt_filepaths.extend(f.read().splitlines()) def load_data(self): # Load all the GrainSize.txt files", "classes from string to float to make consistent with # sieve data col_conv", ": None, 'header' : 0, 'skiprows' : [1], } run_data_frames = [] for", "gsf_name = f\"{scan_name}_{sta_str}_GrainSizeFractions.txt\" gsf_filepath = os.path.join(gsd_dir, gsf_name) # Both data files are read", "= len(discharge_order) calc_time = lambda l, d, t: t + 60*(d + 2*l*(n_discharges-1-d))", "= gsd_name.split('_') exp_code, step, period, scan_length = scan_name.split('-') # Calculate experiment time based", "gsd_name.split('.', 1)[0] scan_name, sta_str, _ = gsd_name.split('_') exp_code, step, period, scan_length = scan_name.split('-')", "same, do in a loop run_data = pd.DataFrame() for filepath in [gsd_filepath, gsf_filepath]:", "'2.8' : 2.83, '4' : 4 , '5.6' : 5.66, '8' : 8", "the distributed GrainSize.txt files. Updates the omnipickle.\"\"\" def __init__(self): self.root = settings.cart_data_dir self.pickle_destination", "logger self.logger = logger.Logger(self.log_filepath, default_verbose=True) self.logger.write([\"Begin GSD Processor output\", asctime()]) # Start up", "'8' : 8 , '11.3' : 11.2, '16' : 16 , '22.6' :", "self.all_data.columns = [col_conv[c] if c in col_conv else c \\ for c in", "GrainSize.txt files and combine gsd_txt_kwargs = { 'index_col' : None, 'header' : 0,", "loop run_data = pd.DataFrame() for filepath in [gsd_filepath, gsf_filepath]: # Load data and", "0.5 , '0.71' : 0.71, '1' : 1 , '1.4' : 1.41, '2'", "used for a multiindex index_names = ['exp_code', 'step', 'period', 'sta_str', 'scan_length'] index_vals =", "self.all_data = pd.concat(run_data_frames, ignore_index=True) self.all_data.set_index(index_names, inplace=True) self.all_data.sort_index(inplace=True) # Convert size classes from string", "global_settings as settings from helpyr import data_loading from helpyr import logger #from helpyr", "#'/home/alex/feed-timing/data/cart/gsd_from_backups' ] for gsd_list_path in gsd_list_paths: with open(gsd_list_path) as f: self.logger.write(f'Reading from {gsd_list_path}')", "'22.6' : 22.3, '32' : 32 , } self.all_data.columns = [col_conv[c] if c", "find_gsd_txt_files(self): # Find all the GrainSize.txt files self.logger.write(\"\") #crawler = crawler.Crawler(logger=self.logger) #crawler.set_root(self.root) #self.gsd_txt_filepaths", "import data_loading from helpyr import logger #from helpyr import crawler from helpyr import", "default_verbose=True) self.logger.write([\"Begin GSD Processor output\", asctime()]) # Start up loader self.loader = data_loading.DataLoader(", "def load_data(self): # Load all the GrainSize.txt files and combine gsd_txt_kwargs = {", "Reload omnimanager self.omnimanager = OmnipickleManager(self.logger) self.omnimanager.restore() self.logger.write(\"Updating experiment definitions\") self.omnimanager.update_tree_definitions() self.gsd_txt_filepaths = []", "Load all the GrainSize.txt files and combine gsd_txt_kwargs = { 'index_col' : None,", "GrainSize files\", after_msg=\"Finished!\") indent_function(self.load_data, before_msg=\"Loading and merging data\", after_msg=\"Finished!\") indent_function(self.update_omnipickle, before_msg=\"Updating omnipickle\", after_msg=\"Finished!\")", "calc_time = lambda l, d, t: t + 60*(d + 2*l*(n_discharges-1-d)) exp_time =", "fraction file gsf_name = f\"{scan_name}_{sta_str}_GrainSizeFractions.txt\" gsf_filepath = os.path.join(gsd_dir, gsf_name) # Both data files", "and combine gsd_txt_kwargs = { 'index_col' : None, 'header' : 0, 'skiprows' :", "{gsd_list_path}') self.gsd_txt_filepaths.extend(f.read().splitlines()) def load_data(self): # Load all the GrainSize.txt files and combine gsd_txt_kwargs", "gsd_name = hm.nsplit(gsd_filepath, 1) gsd_name = gsd_name.split('.', 1)[0] scan_name, sta_str, _ = gsd_name.split('_')", "= discharge_order.index(discharge) n_discharges = len(discharge_order) calc_time = lambda l, d, t: t +", "helpyr import helpyr_misc as hm # currently set to handed 2m gsd #", "= os.path.join(gsd_dir, gsf_name) # Both data files are read exactly the same, do", "= crawler.get_target_files( # \"??-*L-t??-8m_sta-*_GrainSize.txt\", verbose_file_list=False) # example filename: 3B-r87L-t60-8m_sta-2000_GrainSize.txt #crawler.end() #gsd_list_path = '/home/alex/feed-timing/code/matlab/supporting-files/2m_2B_final_gsd_list.txt'", "'header' : 0, 'skiprows' : [1], } run_data_frames = [] for gsd_filepath in", "self.logger.run_indented_function indent_function(self.find_gsd_txt_files, before_msg=\"Finding GrainSize files\", after_msg=\"Finished!\") indent_function(self.load_data, before_msg=\"Loading and merging data\", after_msg=\"Finished!\") indent_function(self.update_omnipickle,", "2 , '2.8' : 2.83, '4' : 4 , '5.6' : 5.66, '8'", "currently set to handed 2m gsd # Doing it the quick way.... #", "def find_gsd_txt_files(self): # Find all the GrainSize.txt files self.logger.write(\"\") #crawler = crawler.Crawler(logger=self.logger) #crawler.set_root(self.root)", "'32' : 32 , } self.all_data.columns = [col_conv[c] if c in col_conv else", "discharge_index = discharge_order.index(discharge) n_discharges = len(discharge_order) calc_time = lambda l, d, t: t", "[exp_code, step, period, sta_str, scan_length] var_names = index_names + ['scan_name', 'exp_time'] var_vals =", "update_omnipickle(self): # Add gsd data to omnipickle hm.ensure_dir_exists(settings.cart_pickles_dir) self.omnimanager.add_gsd_data(settings.cart_pickles_dir, self.all_data) if __name__ ==", "for a multiindex index_names = ['exp_code', 'step', 'period', 'sta_str', 'scan_length'] index_vals = [exp_code,", "[] for gsd_filepath in self.gsd_txt_filepaths: if not os.path.isfile(gsd_filepath): self.logger.write(f\"Missing file {gsd_filepath}\") continue #", "c in col_conv else c \\ for c in self.all_data.columns] def update_omnipickle(self): #", "run_multiindex run_data = pd.concat([run_data, data], axis=1) # Add columns that will be later", "= var_val run_data_frames.append(run_data) # Add data to combined data self.all_data = pd.concat(run_data_frames, ignore_index=True)", "'0.5' : 0.5 , '0.71' : 0.71, '1' : 1 , '1.4' :", "#crawler.set_root(self.root) #self.gsd_txt_filepaths = crawler.get_target_files( # \"??-*L-t??-8m_sta-*_GrainSize.txt\", verbose_file_list=False) # example filename: 3B-r87L-t60-8m_sta-2000_GrainSize.txt #crawler.end() #gsd_list_path", "period_time) # Generate name to grain size fraction file gsf_name = f\"{scan_name}_{sta_str}_GrainSizeFractions.txt\" gsf_filepath", "= gsd_name.split('.', 1)[0] scan_name, sta_str, _ = gsd_name.split('_') exp_code, step, period, scan_length =", "are read exactly the same, do in a loop run_data = pd.DataFrame() for", "sta_str, scan_length] var_names = index_names + ['scan_name', 'exp_time'] var_vals = index_vals + [scan_name,", "files self.logger.write(\"\") #crawler = crawler.Crawler(logger=self.logger) #crawler.set_root(self.root) #self.gsd_txt_filepaths = crawler.get_target_files( # \"??-*L-t??-8m_sta-*_GrainSize.txt\", verbose_file_list=False) #", "Add columns that will be later used for a multiindex index_names = ['exp_code',", "pd.concat([run_data, data], axis=1) # Add columns that will be later used for a", "for var_name, var_val in zip(var_names, var_vals): run_data[var_name] = var_val run_data_frames.append(run_data) # Add data", "after_msg=\"Finished!\") indent_function(self.update_omnipickle, before_msg=\"Updating omnipickle\", after_msg=\"Finished!\") indent_function(self.omnimanager.store, kwargs={'overwrite':{'gsd':True}}, before_msg=\"Storing omnipickle\", after_msg=\"Finished!\") #print(self.omnimanager.experiments['1B'].periods[35].gsd_picklepath) def find_gsd_txt_files(self):", "as hm # currently set to handed 2m gsd # Doing it the", "omnipickle\", after_msg=\"Finished!\") #print(self.omnimanager.experiments['1B'].periods[35].gsd_picklepath) def find_gsd_txt_files(self): # Find all the GrainSize.txt files self.logger.write(\"\") #crawler", "to get run info gsd_dir, gsd_name = hm.nsplit(gsd_filepath, 1) gsd_name = gsd_name.split('.', 1)[0]", "data files are read exactly the same, do in a loop run_data =", "gsd # Doing it the quick way.... # Search for: self.gsd_txt_filepaths = class", "GrainSize.txt files. Updates the omnipickle.\"\"\" def __init__(self): self.root = settings.cart_data_dir self.pickle_destination = settings.cart_pickles_dir", "100] discharge_index = discharge_order.index(discharge) n_discharges = len(discharge_order) calc_time = lambda l, d, t:", "columns that will be later used for a multiindex index_names = ['exp_code', 'step',", "Updates the omnipickle.\"\"\" def __init__(self): self.root = settings.cart_data_dir self.pickle_destination = settings.cart_pickles_dir self.log_filepath =", "# Add columns that will be later used for a multiindex index_names =", "import numpy as np import pandas as pd from time import asctime from", "self.logger.write(\"\") #crawler = crawler.Crawler(logger=self.logger) #crawler.set_root(self.root) #self.gsd_txt_filepaths = crawler.get_target_files( # \"??-*L-t??-8m_sta-*_GrainSize.txt\", verbose_file_list=False) # example", "self.root = settings.cart_data_dir self.pickle_destination = settings.cart_pickles_dir self.log_filepath = f\"{settings.log_dir}/gsd_processor.txt\" # Start up logger", "name to grain size fraction file gsf_name = f\"{scan_name}_{sta_str}_GrainSizeFractions.txt\" gsf_filepath = os.path.join(gsd_dir, gsf_name)", "= int(step[1:-1]) period_time = int(period[1:]) discharge_order = [50, 62, 75, 87, 100] discharge_index", "= run_multiindex run_data = pd.concat([run_data, data], axis=1) # Add columns that will be", "import pandas as pd from time import asctime from omnipickle_manager import OmnipickleManager import", "import os import numpy as np import pandas as pd from time import", "file {gsd_filepath}\") continue # Pull apart provided filepath to GrainSize.txt to get run", "= '/home/alex/feed-timing/code/matlab/supporting-files/2m_2B_final_gsd_list.txt' gsd_list_paths = [ '/home/alex/feed-timing/code/matlab/supporting-files/8m_final_gsd_list.txt', '/home/alex/feed-timing/code/matlab/supporting-files/2m_final_gsd_list.txt', #'/home/alex/feed-timing/data/cart/gsd_from_backups' ] for gsd_list_path in gsd_list_paths:", "self.omnimanager.restore() self.logger.write(\"Updating experiment definitions\") self.omnimanager.update_tree_definitions() self.gsd_txt_filepaths = [] def run(self): indent_function = self.logger.run_indented_function", "+ ['scan_name', 'exp_time'] var_vals = index_vals + [scan_name, exp_time] for var_name, var_val in", "gsd_txt_kwargs = { 'index_col' : None, 'header' : 0, 'skiprows' : [1], }", "\\ for c in self.all_data.columns] def update_omnipickle(self): # Add gsd data to omnipickle", "from helpyr import data_loading from helpyr import logger #from helpyr import crawler from", "run info gsd_dir, gsd_name = hm.nsplit(gsd_filepath, 1) gsd_name = gsd_name.split('.', 1)[0] scan_name, sta_str,", "'f' discharge = int(step[1:-1]) period_time = int(period[1:]) discharge_order = [50, 62, 75, 87,", "that will be later used for a multiindex index_names = ['exp_code', 'step', 'period',", "run_data[var_name] = var_val run_data_frames.append(run_data) # Add data to combined data self.all_data = pd.concat(run_data_frames,", "from time import asctime from omnipickle_manager import OmnipickleManager import global_settings as settings from", "var_vals): run_data[var_name] = var_val run_data_frames.append(run_data) # Add data to combined data self.all_data =", "open(gsd_list_path) as f: self.logger.write(f'Reading from {gsd_list_path}') self.gsd_txt_filepaths.extend(f.read().splitlines()) def load_data(self): # Load all the", "self.log_filepath = f\"{settings.log_dir}/gsd_processor.txt\" # Start up logger self.logger = logger.Logger(self.log_filepath, default_verbose=True) self.logger.write([\"Begin GSD", "Find all the GrainSize.txt files self.logger.write(\"\") #crawler = crawler.Crawler(logger=self.logger) #crawler.set_root(self.root) #self.gsd_txt_filepaths = crawler.get_target_files(", "logger.Logger(self.log_filepath, default_verbose=True) self.logger.write([\"Begin GSD Processor output\", asctime()]) # Start up loader self.loader =", "and period is_falling = step[0] == 'f' discharge = int(step[1:-1]) period_time = int(period[1:])", "size classes from string to float to make consistent with # sieve data", "Both data files are read exactly the same, do in a loop run_data", "self.all_data.sort_index(inplace=True) # Convert size classes from string to float to make consistent with", "merging data\", after_msg=\"Finished!\") indent_function(self.update_omnipickle, before_msg=\"Updating omnipickle\", after_msg=\"Finished!\") indent_function(self.omnimanager.store, kwargs={'overwrite':{'gsd':True}}, before_msg=\"Storing omnipickle\", after_msg=\"Finished!\") #print(self.omnimanager.experiments['1B'].periods[35].gsd_picklepath)", "after_msg=\"Finished!\") indent_function(self.omnimanager.store, kwargs={'overwrite':{'gsd':True}}, before_msg=\"Storing omnipickle\", after_msg=\"Finished!\") #print(self.omnimanager.experiments['1B'].periods[35].gsd_picklepath) def find_gsd_txt_files(self): # Find all the", "= [ '/home/alex/feed-timing/code/matlab/supporting-files/8m_final_gsd_list.txt', '/home/alex/feed-timing/code/matlab/supporting-files/2m_final_gsd_list.txt', #'/home/alex/feed-timing/data/cart/gsd_from_backups' ] for gsd_list_path in gsd_list_paths: with open(gsd_list_path) as", ", '2.8' : 2.83, '4' : 4 , '5.6' : 5.66, '8' :", "asctime()]) # Start up loader self.loader = data_loading.DataLoader( self.pickle_destination, logger=self.logger) # Reload omnimanager", ": 8 , '11.3' : 11.2, '16' : 16 , '22.6' : 22.3,", "2.83, '4' : 4 , '5.6' : 5.66, '8' : 8 , '11.3'", "8 , '11.3' : 11.2, '16' : 16 , '22.6' : 22.3, '32'", "self.logger.write(\"Updating experiment definitions\") self.omnimanager.update_tree_definitions() self.gsd_txt_filepaths = [] def run(self): indent_function = self.logger.run_indented_function indent_function(self.find_gsd_txt_files,", "period_time = int(period[1:]) discharge_order = [50, 62, 75, 87, 100] discharge_index = discharge_order.index(discharge)", "combines the distributed GrainSize.txt files. Updates the omnipickle.\"\"\" def __init__(self): self.root = settings.cart_data_dir", "var_val in zip(var_names, var_vals): run_data[var_name] = var_val run_data_frames.append(run_data) # Add data to combined", "handed 2m gsd # Doing it the quick way.... # Search for: self.gsd_txt_filepaths", "# example filename: 3B-r87L-t60-8m_sta-2000_GrainSize.txt #crawler.end() #gsd_list_path = '/home/alex/feed-timing/code/matlab/supporting-files/2m_2B_final_gsd_list.txt' gsd_list_paths = [ '/home/alex/feed-timing/code/matlab/supporting-files/8m_final_gsd_list.txt', '/home/alex/feed-timing/code/matlab/supporting-files/2m_final_gsd_list.txt',", "'scan_length'] index_vals = [exp_code, step, period, sta_str, scan_length] var_names = index_names + ['scan_name',", "'2' : 2 , '2.8' : 2.83, '4' : 4 , '5.6' :", "'0.71' : 0.71, '1' : 1 , '1.4' : 1.41, '2' : 2", "multiindex index_names = ['exp_code', 'step', 'period', 'sta_str', 'scan_length'] index_vals = [exp_code, step, period,", "['exp_code', 'step', 'period', 'sta_str', 'scan_length'] index_vals = [exp_code, step, period, sta_str, scan_length] var_names", "files are read exactly the same, do in a loop run_data = pd.DataFrame()", "for filepath in [gsd_filepath, gsf_filepath]: # Load data and set the index label", "settings.cart_data_dir self.pickle_destination = settings.cart_pickles_dir self.log_filepath = f\"{settings.log_dir}/gsd_processor.txt\" # Start up logger self.logger =", "self.loader.load_txt(filepath, gsd_txt_kwargs, add_path=False) #data.index = run_multiindex run_data = pd.concat([run_data, data], axis=1) # Add", "import logger #from helpyr import crawler from helpyr import helpyr_misc as hm #", "string to float to make consistent with # sieve data col_conv = {", "= int(period[1:]) discharge_order = [50, 62, 75, 87, 100] discharge_index = discharge_order.index(discharge) n_discharges", "self.logger.write(f\"Missing file {gsd_filepath}\") continue # Pull apart provided filepath to GrainSize.txt to get", "# Pull apart provided filepath to GrainSize.txt to get run info gsd_dir, gsd_name", "data and set the index label data = self.loader.load_txt(filepath, gsd_txt_kwargs, add_path=False) #data.index =", "[50, 62, 75, 87, 100] discharge_index = discharge_order.index(discharge) n_discharges = len(discharge_order) calc_time =", "in zip(var_names, var_vals): run_data[var_name] = var_val run_data_frames.append(run_data) # Add data to combined data", "data_loading.DataLoader( self.pickle_destination, logger=self.logger) # Reload omnimanager self.omnimanager = OmnipickleManager(self.logger) self.omnimanager.restore() self.logger.write(\"Updating experiment definitions\")", "col_conv else c \\ for c in self.all_data.columns] def update_omnipickle(self): # Add gsd", "4 , '5.6' : 5.66, '8' : 8 , '11.3' : 11.2, '16'", "Processor output\", asctime()]) # Start up loader self.loader = data_loading.DataLoader( self.pickle_destination, logger=self.logger) #", "indent_function(self.omnimanager.store, kwargs={'overwrite':{'gsd':True}}, before_msg=\"Storing omnipickle\", after_msg=\"Finished!\") #print(self.omnimanager.experiments['1B'].periods[35].gsd_picklepath) def find_gsd_txt_files(self): # Find all the GrainSize.txt", "= OmnipickleManager(self.logger) self.omnimanager.restore() self.logger.write(\"Updating experiment definitions\") self.omnimanager.update_tree_definitions() self.gsd_txt_filepaths = [] def run(self): indent_function", "pd from time import asctime from omnipickle_manager import OmnipickleManager import global_settings as settings", "axis=1) # Add columns that will be later used for a multiindex index_names", "data to omnipickle hm.ensure_dir_exists(settings.cart_pickles_dir) self.omnimanager.add_gsd_data(settings.cart_pickles_dir, self.all_data) if __name__ == \"__main__\": # Run the", "ignore_index=True) self.all_data.set_index(index_names, inplace=True) self.all_data.sort_index(inplace=True) # Convert size classes from string to float to", "sieve data col_conv = { '0.5' : 0.5 , '0.71' : 0.71, '1'", "# Load all the GrainSize.txt files and combine gsd_txt_kwargs = { 'index_col' :", "step, period, scan_length = scan_name.split('-') # Calculate experiment time based on step and", "= f\"{scan_name}_{sta_str}_GrainSizeFractions.txt\" gsf_filepath = os.path.join(gsd_dir, gsf_name) # Both data files are read exactly", "= pd.concat([run_data, data], axis=1) # Add columns that will be later used for", "= step[0] == 'f' discharge = int(step[1:-1]) period_time = int(period[1:]) discharge_order = [50,", "= data_loading.DataLoader( self.pickle_destination, logger=self.logger) # Reload omnimanager self.omnimanager = OmnipickleManager(self.logger) self.omnimanager.restore() self.logger.write(\"Updating experiment", "up logger self.logger = logger.Logger(self.log_filepath, default_verbose=True) self.logger.write([\"Begin GSD Processor output\", asctime()]) # Start", "'11.3' : 11.2, '16' : 16 , '22.6' : 22.3, '32' : 32", ": 2 , '2.8' : 2.83, '4' : 4 , '5.6' : 5.66,", "period, sta_str, scan_length] var_names = index_names + ['scan_name', 'exp_time'] var_vals = index_vals +", "gsd_txt_kwargs, add_path=False) #data.index = run_multiindex run_data = pd.concat([run_data, data], axis=1) # Add columns", "the GrainSize.txt files and combine gsd_txt_kwargs = { 'index_col' : None, 'header' :", "a multiindex index_names = ['exp_code', 'step', 'period', 'sta_str', 'scan_length'] index_vals = [exp_code, step,", "class GSDProcessor: \"\"\" Collects combines the distributed GrainSize.txt files. Updates the omnipickle.\"\"\" def", "{ 'index_col' : None, 'header' : 0, 'skiprows' : [1], } run_data_frames =", "1 , '1.4' : 1.41, '2' : 2 , '2.8' : 2.83, '4'", "# Start up logger self.logger = logger.Logger(self.log_filepath, default_verbose=True) self.logger.write([\"Begin GSD Processor output\", asctime()])", "#self.gsd_txt_filepaths = crawler.get_target_files( # \"??-*L-t??-8m_sta-*_GrainSize.txt\", verbose_file_list=False) # example filename: 3B-r87L-t60-8m_sta-2000_GrainSize.txt #crawler.end() #gsd_list_path =", "gsd_list_paths = [ '/home/alex/feed-timing/code/matlab/supporting-files/8m_final_gsd_list.txt', '/home/alex/feed-timing/code/matlab/supporting-files/2m_final_gsd_list.txt', #'/home/alex/feed-timing/data/cart/gsd_from_backups' ] for gsd_list_path in gsd_list_paths: with open(gsd_list_path)", "self.omnimanager = OmnipickleManager(self.logger) self.omnimanager.restore() self.logger.write(\"Updating experiment definitions\") self.omnimanager.update_tree_definitions() self.gsd_txt_filepaths = [] def run(self):", "to grain size fraction file gsf_name = f\"{scan_name}_{sta_str}_GrainSizeFractions.txt\" gsf_filepath = os.path.join(gsd_dir, gsf_name) #", "= self.loader.load_txt(filepath, gsd_txt_kwargs, add_path=False) #data.index = run_multiindex run_data = pd.concat([run_data, data], axis=1) #", "hm.ensure_dir_exists(settings.cart_pickles_dir) self.omnimanager.add_gsd_data(settings.cart_pickles_dir, self.all_data) if __name__ == \"__main__\": # Run the script gsd_processor =", "#data.index = run_multiindex run_data = pd.concat([run_data, data], axis=1) # Add columns that will", "combine gsd_txt_kwargs = { 'index_col' : None, 'header' : 0, 'skiprows' : [1],", "do in a loop run_data = pd.DataFrame() for filepath in [gsd_filepath, gsf_filepath]: #", "#crawler.end() #gsd_list_path = '/home/alex/feed-timing/code/matlab/supporting-files/2m_2B_final_gsd_list.txt' gsd_list_paths = [ '/home/alex/feed-timing/code/matlab/supporting-files/8m_final_gsd_list.txt', '/home/alex/feed-timing/code/matlab/supporting-files/2m_final_gsd_list.txt', #'/home/alex/feed-timing/data/cart/gsd_from_backups' ] for gsd_list_path", "'/home/alex/feed-timing/code/matlab/supporting-files/8m_final_gsd_list.txt', '/home/alex/feed-timing/code/matlab/supporting-files/2m_final_gsd_list.txt', #'/home/alex/feed-timing/data/cart/gsd_from_backups' ] for gsd_list_path in gsd_list_paths: with open(gsd_list_path) as f: self.logger.write(f'Reading", "pd.DataFrame() for filepath in [gsd_filepath, gsf_filepath]: # Load data and set the index", "import crawler from helpyr import helpyr_misc as hm # currently set to handed", "data], axis=1) # Add columns that will be later used for a multiindex", "to float to make consistent with # sieve data col_conv = { '0.5'", "def __init__(self): self.root = settings.cart_data_dir self.pickle_destination = settings.cart_pickles_dir self.log_filepath = f\"{settings.log_dir}/gsd_processor.txt\" # Start", "in self.gsd_txt_filepaths: if not os.path.isfile(gsd_filepath): self.logger.write(f\"Missing file {gsd_filepath}\") continue # Pull apart provided", ", '22.6' : 22.3, '32' : 32 , } self.all_data.columns = [col_conv[c] if", "Collects combines the distributed GrainSize.txt files. Updates the omnipickle.\"\"\" def __init__(self): self.root =", "sta_str, _ = gsd_name.split('_') exp_code, step, period, scan_length = scan_name.split('-') # Calculate experiment", "in [gsd_filepath, gsf_filepath]: # Load data and set the index label data =", "the omnipickle.\"\"\" def __init__(self): self.root = settings.cart_data_dir self.pickle_destination = settings.cart_pickles_dir self.log_filepath = f\"{settings.log_dir}/gsd_processor.txt\"", "self.all_data.set_index(index_names, inplace=True) self.all_data.sort_index(inplace=True) # Convert size classes from string to float to make", "'sta_str', 'scan_length'] index_vals = [exp_code, step, period, sta_str, scan_length] var_names = index_names +", "hm.nsplit(gsd_filepath, 1) gsd_name = gsd_name.split('.', 1)[0] scan_name, sta_str, _ = gsd_name.split('_') exp_code, step,", "read exactly the same, do in a loop run_data = pd.DataFrame() for filepath", "numpy as np import pandas as pd from time import asctime from omnipickle_manager", "filename: 3B-r87L-t60-8m_sta-2000_GrainSize.txt #crawler.end() #gsd_list_path = '/home/alex/feed-timing/code/matlab/supporting-files/2m_2B_final_gsd_list.txt' gsd_list_paths = [ '/home/alex/feed-timing/code/matlab/supporting-files/8m_final_gsd_list.txt', '/home/alex/feed-timing/code/matlab/supporting-files/2m_final_gsd_list.txt', #'/home/alex/feed-timing/data/cart/gsd_from_backups' ]", "get run info gsd_dir, gsd_name = hm.nsplit(gsd_filepath, 1) gsd_name = gsd_name.split('.', 1)[0] scan_name,", "'/home/alex/feed-timing/code/matlab/supporting-files/2m_2B_final_gsd_list.txt' gsd_list_paths = [ '/home/alex/feed-timing/code/matlab/supporting-files/8m_final_gsd_list.txt', '/home/alex/feed-timing/code/matlab/supporting-files/2m_final_gsd_list.txt', #'/home/alex/feed-timing/data/cart/gsd_from_backups' ] for gsd_list_path in gsd_list_paths: with", "var_val run_data_frames.append(run_data) # Add data to combined data self.all_data = pd.concat(run_data_frames, ignore_index=True) self.all_data.set_index(index_names,", "Add data to combined data self.all_data = pd.concat(run_data_frames, ignore_index=True) self.all_data.set_index(index_names, inplace=True) self.all_data.sort_index(inplace=True) #", "run(self): indent_function = self.logger.run_indented_function indent_function(self.find_gsd_txt_files, before_msg=\"Finding GrainSize files\", after_msg=\"Finished!\") indent_function(self.load_data, before_msg=\"Loading and merging", ": 1 , '1.4' : 1.41, '2' : 2 , '2.8' : 2.83,", "# Add gsd data to omnipickle hm.ensure_dir_exists(settings.cart_pickles_dir) self.omnimanager.add_gsd_data(settings.cart_pickles_dir, self.all_data) if __name__ == \"__main__\":", "logger=self.logger) # Reload omnimanager self.omnimanager = OmnipickleManager(self.logger) self.omnimanager.restore() self.logger.write(\"Updating experiment definitions\") self.omnimanager.update_tree_definitions() self.gsd_txt_filepaths", "gsd_list_path in gsd_list_paths: with open(gsd_list_path) as f: self.logger.write(f'Reading from {gsd_list_path}') self.gsd_txt_filepaths.extend(f.read().splitlines()) def load_data(self):", "it the quick way.... # Search for: self.gsd_txt_filepaths = class GSDProcessor: \"\"\" Collects", "data_loading from helpyr import logger #from helpyr import crawler from helpyr import helpyr_misc", "index_names = ['exp_code', 'step', 'period', 'sta_str', 'scan_length'] index_vals = [exp_code, step, period, sta_str,", "helpyr import crawler from helpyr import helpyr_misc as hm # currently set to", "pd.concat(run_data_frames, ignore_index=True) self.all_data.set_index(index_names, inplace=True) self.all_data.sort_index(inplace=True) # Convert size classes from string to float", "omnipickle hm.ensure_dir_exists(settings.cart_pickles_dir) self.omnimanager.add_gsd_data(settings.cart_pickles_dir, self.all_data) if __name__ == \"__main__\": # Run the script gsd_processor", "} self.all_data.columns = [col_conv[c] if c in col_conv else c \\ for c", "# Doing it the quick way.... # Search for: self.gsd_txt_filepaths = class GSDProcessor:", "calc_time(is_falling, discharge_index, period_time) # Generate name to grain size fraction file gsf_name =", "== 'f' discharge = int(step[1:-1]) period_time = int(period[1:]) discharge_order = [50, 62, 75,", "and merging data\", after_msg=\"Finished!\") indent_function(self.update_omnipickle, before_msg=\"Updating omnipickle\", after_msg=\"Finished!\") indent_function(self.omnimanager.store, kwargs={'overwrite':{'gsd':True}}, before_msg=\"Storing omnipickle\", after_msg=\"Finished!\")", "# Start up loader self.loader = data_loading.DataLoader( self.pickle_destination, logger=self.logger) # Reload omnimanager self.omnimanager", "self.gsd_txt_filepaths: if not os.path.isfile(gsd_filepath): self.logger.write(f\"Missing file {gsd_filepath}\") continue # Pull apart provided filepath", "file gsf_name = f\"{scan_name}_{sta_str}_GrainSizeFractions.txt\" gsf_filepath = os.path.join(gsd_dir, gsf_name) # Both data files are", "Add gsd data to omnipickle hm.ensure_dir_exists(settings.cart_pickles_dir) self.omnimanager.add_gsd_data(settings.cart_pickles_dir, self.all_data) if __name__ == \"__main__\": #", "int(step[1:-1]) period_time = int(period[1:]) discharge_order = [50, 62, 75, 87, 100] discharge_index =", "after_msg=\"Finished!\") indent_function(self.load_data, before_msg=\"Loading and merging data\", after_msg=\"Finished!\") indent_function(self.update_omnipickle, before_msg=\"Updating omnipickle\", after_msg=\"Finished!\") indent_function(self.omnimanager.store, kwargs={'overwrite':{'gsd':True}},", "f\"{scan_name}_{sta_str}_GrainSizeFractions.txt\" gsf_filepath = os.path.join(gsd_dir, gsf_name) # Both data files are read exactly the", "before_msg=\"Updating omnipickle\", after_msg=\"Finished!\") indent_function(self.omnimanager.store, kwargs={'overwrite':{'gsd':True}}, before_msg=\"Storing omnipickle\", after_msg=\"Finished!\") #print(self.omnimanager.experiments['1B'].periods[35].gsd_picklepath) def find_gsd_txt_files(self): # Find", "the index label data = self.loader.load_txt(filepath, gsd_txt_kwargs, add_path=False) #data.index = run_multiindex run_data =", "index label data = self.loader.load_txt(filepath, gsd_txt_kwargs, add_path=False) #data.index = run_multiindex run_data = pd.concat([run_data,", "os.path.isfile(gsd_filepath): self.logger.write(f\"Missing file {gsd_filepath}\") continue # Pull apart provided filepath to GrainSize.txt to", ", } self.all_data.columns = [col_conv[c] if c in col_conv else c \\ for", "= index_vals + [scan_name, exp_time] for var_name, var_val in zip(var_names, var_vals): run_data[var_name] =", "self.all_data.columns] def update_omnipickle(self): # Add gsd data to omnipickle hm.ensure_dir_exists(settings.cart_pickles_dir) self.omnimanager.add_gsd_data(settings.cart_pickles_dir, self.all_data) if", "1)[0] scan_name, sta_str, _ = gsd_name.split('_') exp_code, step, period, scan_length = scan_name.split('-') #", "will be later used for a multiindex index_names = ['exp_code', 'step', 'period', 'sta_str',", "= pd.DataFrame() for filepath in [gsd_filepath, gsf_filepath]: # Load data and set the", "f\"{settings.log_dir}/gsd_processor.txt\" # Start up logger self.logger = logger.Logger(self.log_filepath, default_verbose=True) self.logger.write([\"Begin GSD Processor output\",", "#!/usr/bin/env python3 import os import numpy as np import pandas as pd from", "['scan_name', 'exp_time'] var_vals = index_vals + [scan_name, exp_time] for var_name, var_val in zip(var_names,", "0.71, '1' : 1 , '1.4' : 1.41, '2' : 2 , '2.8'", "helpyr import logger #from helpyr import crawler from helpyr import helpyr_misc as hm", "Doing it the quick way.... # Search for: self.gsd_txt_filepaths = class GSDProcessor: \"\"\"", "filepath to GrainSize.txt to get run info gsd_dir, gsd_name = hm.nsplit(gsd_filepath, 1) gsd_name", "self.logger = logger.Logger(self.log_filepath, default_verbose=True) self.logger.write([\"Begin GSD Processor output\", asctime()]) # Start up loader", "\"??-*L-t??-8m_sta-*_GrainSize.txt\", verbose_file_list=False) # example filename: 3B-r87L-t60-8m_sta-2000_GrainSize.txt #crawler.end() #gsd_list_path = '/home/alex/feed-timing/code/matlab/supporting-files/2m_2B_final_gsd_list.txt' gsd_list_paths = [", ": 1.41, '2' : 2 , '2.8' : 2.83, '4' : 4 ,", "[scan_name, exp_time] for var_name, var_val in zip(var_names, var_vals): run_data[var_name] = var_val run_data_frames.append(run_data) #", "22.3, '32' : 32 , } self.all_data.columns = [col_conv[c] if c in col_conv", ", '11.3' : 11.2, '16' : 16 , '22.6' : 22.3, '32' :", ": 22.3, '32' : 32 , } self.all_data.columns = [col_conv[c] if c in", "= logger.Logger(self.log_filepath, default_verbose=True) self.logger.write([\"Begin GSD Processor output\", asctime()]) # Start up loader self.loader", "run_data_frames = [] for gsd_filepath in self.gsd_txt_filepaths: if not os.path.isfile(gsd_filepath): self.logger.write(f\"Missing file {gsd_filepath}\")", "discharge_order = [50, 62, 75, 87, 100] discharge_index = discharge_order.index(discharge) n_discharges = len(discharge_order)", "'period', 'sta_str', 'scan_length'] index_vals = [exp_code, step, period, sta_str, scan_length] var_names = index_names", "75, 87, 100] discharge_index = discharge_order.index(discharge) n_discharges = len(discharge_order) calc_time = lambda l,", "2m gsd # Doing it the quick way.... # Search for: self.gsd_txt_filepaths =", "grain size fraction file gsf_name = f\"{scan_name}_{sta_str}_GrainSizeFractions.txt\" gsf_filepath = os.path.join(gsd_dir, gsf_name) # Both", ": [1], } run_data_frames = [] for gsd_filepath in self.gsd_txt_filepaths: if not os.path.isfile(gsd_filepath):", "# Generate name to grain size fraction file gsf_name = f\"{scan_name}_{sta_str}_GrainSizeFractions.txt\" gsf_filepath =", "= calc_time(is_falling, discharge_index, period_time) # Generate name to grain size fraction file gsf_name", "combined data self.all_data = pd.concat(run_data_frames, ignore_index=True) self.all_data.set_index(index_names, inplace=True) self.all_data.sort_index(inplace=True) # Convert size classes", "OmnipickleManager import global_settings as settings from helpyr import data_loading from helpyr import logger", "{gsd_filepath}\") continue # Pull apart provided filepath to GrainSize.txt to get run info", "'skiprows' : [1], } run_data_frames = [] for gsd_filepath in self.gsd_txt_filepaths: if not", "Convert size classes from string to float to make consistent with # sieve", ": 0, 'skiprows' : [1], } run_data_frames = [] for gsd_filepath in self.gsd_txt_filepaths:", "= [col_conv[c] if c in col_conv else c \\ for c in self.all_data.columns]", "60*(d + 2*l*(n_discharges-1-d)) exp_time = calc_time(is_falling, discharge_index, period_time) # Generate name to grain", "pandas as pd from time import asctime from omnipickle_manager import OmnipickleManager import global_settings", ": 2.83, '4' : 4 , '5.6' : 5.66, '8' : 8 ,", "run_data_frames.append(run_data) # Add data to combined data self.all_data = pd.concat(run_data_frames, ignore_index=True) self.all_data.set_index(index_names, inplace=True)", "output\", asctime()]) # Start up loader self.loader = data_loading.DataLoader( self.pickle_destination, logger=self.logger) # Reload", "before_msg=\"Storing omnipickle\", after_msg=\"Finished!\") #print(self.omnimanager.experiments['1B'].periods[35].gsd_picklepath) def find_gsd_txt_files(self): # Find all the GrainSize.txt files self.logger.write(\"\")", "= [50, 62, 75, 87, 100] discharge_index = discharge_order.index(discharge) n_discharges = len(discharge_order) calc_time", "self.logger.write([\"Begin GSD Processor output\", asctime()]) # Start up loader self.loader = data_loading.DataLoader( self.pickle_destination,", "in gsd_list_paths: with open(gsd_list_path) as f: self.logger.write(f'Reading from {gsd_list_path}') self.gsd_txt_filepaths.extend(f.read().splitlines()) def load_data(self): #", "make consistent with # sieve data col_conv = { '0.5' : 0.5 ,", ": 0.5 , '0.71' : 0.71, '1' : 1 , '1.4' : 1.41,", "self.pickle_destination = settings.cart_pickles_dir self.log_filepath = f\"{settings.log_dir}/gsd_processor.txt\" # Start up logger self.logger = logger.Logger(self.log_filepath,", "loader self.loader = data_loading.DataLoader( self.pickle_destination, logger=self.logger) # Reload omnimanager self.omnimanager = OmnipickleManager(self.logger) self.omnimanager.restore()", "before_msg=\"Finding GrainSize files\", after_msg=\"Finished!\") indent_function(self.load_data, before_msg=\"Loading and merging data\", after_msg=\"Finished!\") indent_function(self.update_omnipickle, before_msg=\"Updating omnipickle\",", "scan_length] var_names = index_names + ['scan_name', 'exp_time'] var_vals = index_vals + [scan_name, exp_time]", "be later used for a multiindex index_names = ['exp_code', 'step', 'period', 'sta_str', 'scan_length']", "int(period[1:]) discharge_order = [50, 62, 75, 87, 100] discharge_index = discharge_order.index(discharge) n_discharges =", "step and period is_falling = step[0] == 'f' discharge = int(step[1:-1]) period_time =", "verbose_file_list=False) # example filename: 3B-r87L-t60-8m_sta-2000_GrainSize.txt #crawler.end() #gsd_list_path = '/home/alex/feed-timing/code/matlab/supporting-files/2m_2B_final_gsd_list.txt' gsd_list_paths = [ '/home/alex/feed-timing/code/matlab/supporting-files/8m_final_gsd_list.txt',", "var_name, var_val in zip(var_names, var_vals): run_data[var_name] = var_val run_data_frames.append(run_data) # Add data to", "discharge_order.index(discharge) n_discharges = len(discharge_order) calc_time = lambda l, d, t: t + 60*(d", "'step', 'period', 'sta_str', 'scan_length'] index_vals = [exp_code, step, period, sta_str, scan_length] var_names =", "Start up loader self.loader = data_loading.DataLoader( self.pickle_destination, logger=self.logger) # Reload omnimanager self.omnimanager =", "Start up logger self.logger = logger.Logger(self.log_filepath, default_verbose=True) self.logger.write([\"Begin GSD Processor output\", asctime()]) #", "for gsd_filepath in self.gsd_txt_filepaths: if not os.path.isfile(gsd_filepath): self.logger.write(f\"Missing file {gsd_filepath}\") continue # Pull", "add_path=False) #data.index = run_multiindex run_data = pd.concat([run_data, data], axis=1) # Add columns that", "step[0] == 'f' discharge = int(step[1:-1]) period_time = int(period[1:]) discharge_order = [50, 62,", "data = self.loader.load_txt(filepath, gsd_txt_kwargs, add_path=False) #data.index = run_multiindex run_data = pd.concat([run_data, data], axis=1)", "data\", after_msg=\"Finished!\") indent_function(self.update_omnipickle, before_msg=\"Updating omnipickle\", after_msg=\"Finished!\") indent_function(self.omnimanager.store, kwargs={'overwrite':{'gsd':True}}, before_msg=\"Storing omnipickle\", after_msg=\"Finished!\") #print(self.omnimanager.experiments['1B'].periods[35].gsd_picklepath) def", "var_vals = index_vals + [scan_name, exp_time] for var_name, var_val in zip(var_names, var_vals): run_data[var_name]", "= [] def run(self): indent_function = self.logger.run_indented_function indent_function(self.find_gsd_txt_files, before_msg=\"Finding GrainSize files\", after_msg=\"Finished!\") indent_function(self.load_data,", "None, 'header' : 0, 'skiprows' : [1], } run_data_frames = [] for gsd_filepath", "way.... # Search for: self.gsd_txt_filepaths = class GSDProcessor: \"\"\" Collects combines the distributed", "62, 75, 87, 100] discharge_index = discharge_order.index(discharge) n_discharges = len(discharge_order) calc_time = lambda", "'5.6' : 5.66, '8' : 8 , '11.3' : 11.2, '16' : 16", "import asctime from omnipickle_manager import OmnipickleManager import global_settings as settings from helpyr import", "is_falling = step[0] == 'f' discharge = int(step[1:-1]) period_time = int(period[1:]) discharge_order =", "0, 'skiprows' : [1], } run_data_frames = [] for gsd_filepath in self.gsd_txt_filepaths: if", "# Calculate experiment time based on step and period is_falling = step[0] ==", "11.2, '16' : 16 , '22.6' : 22.3, '32' : 32 , }", "time import asctime from omnipickle_manager import OmnipickleManager import global_settings as settings from helpyr", "apart provided filepath to GrainSize.txt to get run info gsd_dir, gsd_name = hm.nsplit(gsd_filepath,", "with # sieve data col_conv = { '0.5' : 0.5 , '0.71' :", ": 4 , '5.6' : 5.66, '8' : 8 , '11.3' : 11.2,", "'index_col' : None, 'header' : 0, 'skiprows' : [1], } run_data_frames = []", "example filename: 3B-r87L-t60-8m_sta-2000_GrainSize.txt #crawler.end() #gsd_list_path = '/home/alex/feed-timing/code/matlab/supporting-files/2m_2B_final_gsd_list.txt' gsd_list_paths = [ '/home/alex/feed-timing/code/matlab/supporting-files/8m_final_gsd_list.txt', '/home/alex/feed-timing/code/matlab/supporting-files/2m_final_gsd_list.txt', #'/home/alex/feed-timing/data/cart/gsd_from_backups'", "Pull apart provided filepath to GrainSize.txt to get run info gsd_dir, gsd_name =", "self.gsd_txt_filepaths.extend(f.read().splitlines()) def load_data(self): # Load all the GrainSize.txt files and combine gsd_txt_kwargs =", "all the GrainSize.txt files and combine gsd_txt_kwargs = { 'index_col' : None, 'header'", "# Both data files are read exactly the same, do in a loop", "#gsd_list_path = '/home/alex/feed-timing/code/matlab/supporting-files/2m_2B_final_gsd_list.txt' gsd_list_paths = [ '/home/alex/feed-timing/code/matlab/supporting-files/8m_final_gsd_list.txt', '/home/alex/feed-timing/code/matlab/supporting-files/2m_final_gsd_list.txt', #'/home/alex/feed-timing/data/cart/gsd_from_backups' ] for gsd_list_path in", "to make consistent with # sieve data col_conv = { '0.5' : 0.5", "87, 100] discharge_index = discharge_order.index(discharge) n_discharges = len(discharge_order) calc_time = lambda l, d,", "index_names + ['scan_name', 'exp_time'] var_vals = index_vals + [scan_name, exp_time] for var_name, var_val", "omnipickle.\"\"\" def __init__(self): self.root = settings.cart_data_dir self.pickle_destination = settings.cart_pickles_dir self.log_filepath = f\"{settings.log_dir}/gsd_processor.txt\" #", ", '5.6' : 5.66, '8' : 8 , '11.3' : 11.2, '16' :", "zip(var_names, var_vals): run_data[var_name] = var_val run_data_frames.append(run_data) # Add data to combined data self.all_data", "if c in col_conv else c \\ for c in self.all_data.columns] def update_omnipickle(self):", "'16' : 16 , '22.6' : 22.3, '32' : 32 , } self.all_data.columns", "to handed 2m gsd # Doing it the quick way.... # Search for:", "# Reload omnimanager self.omnimanager = OmnipickleManager(self.logger) self.omnimanager.restore() self.logger.write(\"Updating experiment definitions\") self.omnimanager.update_tree_definitions() self.gsd_txt_filepaths =", "later used for a multiindex index_names = ['exp_code', 'step', 'period', 'sta_str', 'scan_length'] index_vals", "import OmnipickleManager import global_settings as settings from helpyr import data_loading from helpyr import", "3B-r87L-t60-8m_sta-2000_GrainSize.txt #crawler.end() #gsd_list_path = '/home/alex/feed-timing/code/matlab/supporting-files/2m_2B_final_gsd_list.txt' gsd_list_paths = [ '/home/alex/feed-timing/code/matlab/supporting-files/8m_final_gsd_list.txt', '/home/alex/feed-timing/code/matlab/supporting-files/2m_final_gsd_list.txt', #'/home/alex/feed-timing/data/cart/gsd_from_backups' ] for", "as np import pandas as pd from time import asctime from omnipickle_manager import", "5.66, '8' : 8 , '11.3' : 11.2, '16' : 16 , '22.6'", "quick way.... # Search for: self.gsd_txt_filepaths = class GSDProcessor: \"\"\" Collects combines the", "time based on step and period is_falling = step[0] == 'f' discharge =", "# Find all the GrainSize.txt files self.logger.write(\"\") #crawler = crawler.Crawler(logger=self.logger) #crawler.set_root(self.root) #self.gsd_txt_filepaths =", "data self.all_data = pd.concat(run_data_frames, ignore_index=True) self.all_data.set_index(index_names, inplace=True) self.all_data.sort_index(inplace=True) # Convert size classes from", "exp_code, step, period, scan_length = scan_name.split('-') # Calculate experiment time based on step", "= settings.cart_pickles_dir self.log_filepath = f\"{settings.log_dir}/gsd_processor.txt\" # Start up logger self.logger = logger.Logger(self.log_filepath, default_verbose=True)", "Generate name to grain size fraction file gsf_name = f\"{scan_name}_{sta_str}_GrainSizeFractions.txt\" gsf_filepath = os.path.join(gsd_dir,", "index_vals + [scan_name, exp_time] for var_name, var_val in zip(var_names, var_vals): run_data[var_name] = var_val", "# currently set to handed 2m gsd # Doing it the quick way....", "gsd_list_paths: with open(gsd_list_path) as f: self.logger.write(f'Reading from {gsd_list_path}') self.gsd_txt_filepaths.extend(f.read().splitlines()) def load_data(self): # Load", ": 11.2, '16' : 16 , '22.6' : 22.3, '32' : 32 ,", "provided filepath to GrainSize.txt to get run info gsd_dir, gsd_name = hm.nsplit(gsd_filepath, 1)", ": 16 , '22.6' : 22.3, '32' : 32 , } self.all_data.columns =", "omnipickle\", after_msg=\"Finished!\") indent_function(self.omnimanager.store, kwargs={'overwrite':{'gsd':True}}, before_msg=\"Storing omnipickle\", after_msg=\"Finished!\") #print(self.omnimanager.experiments['1B'].periods[35].gsd_picklepath) def find_gsd_txt_files(self): # Find all", "Calculate experiment time based on step and period is_falling = step[0] == 'f'", "self.logger.write(f'Reading from {gsd_list_path}') self.gsd_txt_filepaths.extend(f.read().splitlines()) def load_data(self): # Load all the GrainSize.txt files and", "in self.all_data.columns] def update_omnipickle(self): # Add gsd data to omnipickle hm.ensure_dir_exists(settings.cart_pickles_dir) self.omnimanager.add_gsd_data(settings.cart_pickles_dir, self.all_data)", "files\", after_msg=\"Finished!\") indent_function(self.load_data, before_msg=\"Loading and merging data\", after_msg=\"Finished!\") indent_function(self.update_omnipickle, before_msg=\"Updating omnipickle\", after_msg=\"Finished!\") indent_function(self.omnimanager.store,", "to omnipickle hm.ensure_dir_exists(settings.cart_pickles_dir) self.omnimanager.add_gsd_data(settings.cart_pickles_dir, self.all_data) if __name__ == \"__main__\": # Run the script", "] for gsd_list_path in gsd_list_paths: with open(gsd_list_path) as f: self.logger.write(f'Reading from {gsd_list_path}') self.gsd_txt_filepaths.extend(f.read().splitlines())", "np import pandas as pd from time import asctime from omnipickle_manager import OmnipickleManager", "def update_omnipickle(self): # Add gsd data to omnipickle hm.ensure_dir_exists(settings.cart_pickles_dir) self.omnimanager.add_gsd_data(settings.cart_pickles_dir, self.all_data) if __name__", "gsd_filepath in self.gsd_txt_filepaths: if not os.path.isfile(gsd_filepath): self.logger.write(f\"Missing file {gsd_filepath}\") continue # Pull apart", "= scan_name.split('-') # Calculate experiment time based on step and period is_falling =", "as settings from helpyr import data_loading from helpyr import logger #from helpyr import", "from omnipickle_manager import OmnipickleManager import global_settings as settings from helpyr import data_loading from", "based on step and period is_falling = step[0] == 'f' discharge = int(step[1:-1])", "if not os.path.isfile(gsd_filepath): self.logger.write(f\"Missing file {gsd_filepath}\") continue # Pull apart provided filepath to", "gsd_name = gsd_name.split('.', 1)[0] scan_name, sta_str, _ = gsd_name.split('_') exp_code, step, period, scan_length", "continue # Pull apart provided filepath to GrainSize.txt to get run info gsd_dir,", "2*l*(n_discharges-1-d)) exp_time = calc_time(is_falling, discharge_index, period_time) # Generate name to grain size fraction", "self.omnimanager.update_tree_definitions() self.gsd_txt_filepaths = [] def run(self): indent_function = self.logger.run_indented_function indent_function(self.find_gsd_txt_files, before_msg=\"Finding GrainSize files\",", "experiment definitions\") self.omnimanager.update_tree_definitions() self.gsd_txt_filepaths = [] def run(self): indent_function = self.logger.run_indented_function indent_function(self.find_gsd_txt_files, before_msg=\"Finding", "def run(self): indent_function = self.logger.run_indented_function indent_function(self.find_gsd_txt_files, before_msg=\"Finding GrainSize files\", after_msg=\"Finished!\") indent_function(self.load_data, before_msg=\"Loading and", "info gsd_dir, gsd_name = hm.nsplit(gsd_filepath, 1) gsd_name = gsd_name.split('.', 1)[0] scan_name, sta_str, _", "label data = self.loader.load_txt(filepath, gsd_txt_kwargs, add_path=False) #data.index = run_multiindex run_data = pd.concat([run_data, data],", "omnimanager self.omnimanager = OmnipickleManager(self.logger) self.omnimanager.restore() self.logger.write(\"Updating experiment definitions\") self.omnimanager.update_tree_definitions() self.gsd_txt_filepaths = [] def", "kwargs={'overwrite':{'gsd':True}}, before_msg=\"Storing omnipickle\", after_msg=\"Finished!\") #print(self.omnimanager.experiments['1B'].periods[35].gsd_picklepath) def find_gsd_txt_files(self): # Find all the GrainSize.txt files", "period is_falling = step[0] == 'f' discharge = int(step[1:-1]) period_time = int(period[1:]) discharge_order", "self.pickle_destination, logger=self.logger) # Reload omnimanager self.omnimanager = OmnipickleManager(self.logger) self.omnimanager.restore() self.logger.write(\"Updating experiment definitions\") self.omnimanager.update_tree_definitions()", "scan_name.split('-') # Calculate experiment time based on step and period is_falling = step[0]", "gsf_filepath = os.path.join(gsd_dir, gsf_name) # Both data files are read exactly the same,", "and set the index label data = self.loader.load_txt(filepath, gsd_txt_kwargs, add_path=False) #data.index = run_multiindex", "files and combine gsd_txt_kwargs = { 'index_col' : None, 'header' : 0, 'skiprows'", "[col_conv[c] if c in col_conv else c \\ for c in self.all_data.columns] def", "#from helpyr import crawler from helpyr import helpyr_misc as hm # currently set", "to GrainSize.txt to get run info gsd_dir, gsd_name = hm.nsplit(gsd_filepath, 1) gsd_name =", "load_data(self): # Load all the GrainSize.txt files and combine gsd_txt_kwargs = { 'index_col'", ": 5.66, '8' : 8 , '11.3' : 11.2, '16' : 16 ,", "in col_conv else c \\ for c in self.all_data.columns] def update_omnipickle(self): # Add", "= crawler.Crawler(logger=self.logger) #crawler.set_root(self.root) #self.gsd_txt_filepaths = crawler.get_target_files( # \"??-*L-t??-8m_sta-*_GrainSize.txt\", verbose_file_list=False) # example filename: 3B-r87L-t60-8m_sta-2000_GrainSize.txt", "# sieve data col_conv = { '0.5' : 0.5 , '0.71' : 0.71,", "16 , '22.6' : 22.3, '32' : 32 , } self.all_data.columns = [col_conv[c]", "'exp_time'] var_vals = index_vals + [scan_name, exp_time] for var_name, var_val in zip(var_names, var_vals):", "'/home/alex/feed-timing/code/matlab/supporting-files/2m_final_gsd_list.txt', #'/home/alex/feed-timing/data/cart/gsd_from_backups' ] for gsd_list_path in gsd_list_paths: with open(gsd_list_path) as f: self.logger.write(f'Reading from", "32 , } self.all_data.columns = [col_conv[c] if c in col_conv else c \\", "all the GrainSize.txt files self.logger.write(\"\") #crawler = crawler.Crawler(logger=self.logger) #crawler.set_root(self.root) #self.gsd_txt_filepaths = crawler.get_target_files( #", "a loop run_data = pd.DataFrame() for filepath in [gsd_filepath, gsf_filepath]: # Load data", "t + 60*(d + 2*l*(n_discharges-1-d)) exp_time = calc_time(is_falling, discharge_index, period_time) # Generate name", ", '0.71' : 0.71, '1' : 1 , '1.4' : 1.41, '2' :", "_ = gsd_name.split('_') exp_code, step, period, scan_length = scan_name.split('-') # Calculate experiment time", "from {gsd_list_path}') self.gsd_txt_filepaths.extend(f.read().splitlines()) def load_data(self): # Load all the GrainSize.txt files and combine", "# \"??-*L-t??-8m_sta-*_GrainSize.txt\", verbose_file_list=False) # example filename: 3B-r87L-t60-8m_sta-2000_GrainSize.txt #crawler.end() #gsd_list_path = '/home/alex/feed-timing/code/matlab/supporting-files/2m_2B_final_gsd_list.txt' gsd_list_paths =", "self.gsd_txt_filepaths = class GSDProcessor: \"\"\" Collects combines the distributed GrainSize.txt files. Updates the", "+ 2*l*(n_discharges-1-d)) exp_time = calc_time(is_falling, discharge_index, period_time) # Generate name to grain size", "# Search for: self.gsd_txt_filepaths = class GSDProcessor: \"\"\" Collects combines the distributed GrainSize.txt", "#crawler = crawler.Crawler(logger=self.logger) #crawler.set_root(self.root) #self.gsd_txt_filepaths = crawler.get_target_files( # \"??-*L-t??-8m_sta-*_GrainSize.txt\", verbose_file_list=False) # example filename:", "\"\"\" Collects combines the distributed GrainSize.txt files. Updates the omnipickle.\"\"\" def __init__(self): self.root", "self.loader = data_loading.DataLoader( self.pickle_destination, logger=self.logger) # Reload omnimanager self.omnimanager = OmnipickleManager(self.logger) self.omnimanager.restore() self.logger.write(\"Updating", "helpyr import data_loading from helpyr import logger #from helpyr import crawler from helpyr", "self.omnimanager.add_gsd_data(settings.cart_pickles_dir, self.all_data) if __name__ == \"__main__\": # Run the script gsd_processor = GSDProcessor()", "filepath in [gsd_filepath, gsf_filepath]: # Load data and set the index label data", "the GrainSize.txt files self.logger.write(\"\") #crawler = crawler.Crawler(logger=self.logger) #crawler.set_root(self.root) #self.gsd_txt_filepaths = crawler.get_target_files( # \"??-*L-t??-8m_sta-*_GrainSize.txt\",", "files. Updates the omnipickle.\"\"\" def __init__(self): self.root = settings.cart_data_dir self.pickle_destination = settings.cart_pickles_dir self.log_filepath", "in a loop run_data = pd.DataFrame() for filepath in [gsd_filepath, gsf_filepath]: # Load", "definitions\") self.omnimanager.update_tree_definitions() self.gsd_txt_filepaths = [] def run(self): indent_function = self.logger.run_indented_function indent_function(self.find_gsd_txt_files, before_msg=\"Finding GrainSize", "after_msg=\"Finished!\") #print(self.omnimanager.experiments['1B'].periods[35].gsd_picklepath) def find_gsd_txt_files(self): # Find all the GrainSize.txt files self.logger.write(\"\") #crawler =", "discharge = int(step[1:-1]) period_time = int(period[1:]) discharge_order = [50, 62, 75, 87, 100]", "for: self.gsd_txt_filepaths = class GSDProcessor: \"\"\" Collects combines the distributed GrainSize.txt files. Updates", "} run_data_frames = [] for gsd_filepath in self.gsd_txt_filepaths: if not os.path.isfile(gsd_filepath): self.logger.write(f\"Missing file", "gsd_name.split('_') exp_code, step, period, scan_length = scan_name.split('-') # Calculate experiment time based on", "col_conv = { '0.5' : 0.5 , '0.71' : 0.71, '1' : 1", "<gh_stars>0 #!/usr/bin/env python3 import os import numpy as np import pandas as pd", "import global_settings as settings from helpyr import data_loading from helpyr import logger #from", "GrainSize.txt to get run info gsd_dir, gsd_name = hm.nsplit(gsd_filepath, 1) gsd_name = gsd_name.split('.',", "__init__(self): self.root = settings.cart_data_dir self.pickle_destination = settings.cart_pickles_dir self.log_filepath = f\"{settings.log_dir}/gsd_processor.txt\" # Start up", "scan_length = scan_name.split('-') # Calculate experiment time based on step and period is_falling", "# Convert size classes from string to float to make consistent with #", "[ '/home/alex/feed-timing/code/matlab/supporting-files/8m_final_gsd_list.txt', '/home/alex/feed-timing/code/matlab/supporting-files/2m_final_gsd_list.txt', #'/home/alex/feed-timing/data/cart/gsd_from_backups' ] for gsd_list_path in gsd_list_paths: with open(gsd_list_path) as f:", "GSD Processor output\", asctime()]) # Start up loader self.loader = data_loading.DataLoader( self.pickle_destination, logger=self.logger)", "indent_function(self.find_gsd_txt_files, before_msg=\"Finding GrainSize files\", after_msg=\"Finished!\") indent_function(self.load_data, before_msg=\"Loading and merging data\", after_msg=\"Finished!\") indent_function(self.update_omnipickle, before_msg=\"Updating", "= [] for gsd_filepath in self.gsd_txt_filepaths: if not os.path.isfile(gsd_filepath): self.logger.write(f\"Missing file {gsd_filepath}\") continue", "from helpyr import logger #from helpyr import crawler from helpyr import helpyr_misc as", "exp_time = calc_time(is_falling, discharge_index, period_time) # Generate name to grain size fraction file", "import helpyr_misc as hm # currently set to handed 2m gsd # Doing", "Load data and set the index label data = self.loader.load_txt(filepath, gsd_txt_kwargs, add_path=False) #data.index", "before_msg=\"Loading and merging data\", after_msg=\"Finished!\") indent_function(self.update_omnipickle, before_msg=\"Updating omnipickle\", after_msg=\"Finished!\") indent_function(self.omnimanager.store, kwargs={'overwrite':{'gsd':True}}, before_msg=\"Storing omnipickle\",", "gsf_filepath]: # Load data and set the index label data = self.loader.load_txt(filepath, gsd_txt_kwargs,", "else c \\ for c in self.all_data.columns] def update_omnipickle(self): # Add gsd data", "# Add data to combined data self.all_data = pd.concat(run_data_frames, ignore_index=True) self.all_data.set_index(index_names, inplace=True) self.all_data.sort_index(inplace=True)", "= hm.nsplit(gsd_filepath, 1) gsd_name = gsd_name.split('.', 1)[0] scan_name, sta_str, _ = gsd_name.split('_') exp_code,", "python3 import os import numpy as np import pandas as pd from time", "size fraction file gsf_name = f\"{scan_name}_{sta_str}_GrainSizeFractions.txt\" gsf_filepath = os.path.join(gsd_dir, gsf_name) # Both data", "= index_names + ['scan_name', 'exp_time'] var_vals = index_vals + [scan_name, exp_time] for var_name,", "set the index label data = self.loader.load_txt(filepath, gsd_txt_kwargs, add_path=False) #data.index = run_multiindex run_data", "lambda l, d, t: t + 60*(d + 2*l*(n_discharges-1-d)) exp_time = calc_time(is_falling, discharge_index,", "[gsd_filepath, gsf_filepath]: # Load data and set the index label data = self.loader.load_txt(filepath,", "indent_function = self.logger.run_indented_function indent_function(self.find_gsd_txt_files, before_msg=\"Finding GrainSize files\", after_msg=\"Finished!\") indent_function(self.load_data, before_msg=\"Loading and merging data\",", "[1], } run_data_frames = [] for gsd_filepath in self.gsd_txt_filepaths: if not os.path.isfile(gsd_filepath): self.logger.write(f\"Missing", "var_names = index_names + ['scan_name', 'exp_time'] var_vals = index_vals + [scan_name, exp_time] for", "= ['exp_code', 'step', 'period', 'sta_str', 'scan_length'] index_vals = [exp_code, step, period, sta_str, scan_length]", "= class GSDProcessor: \"\"\" Collects combines the distributed GrainSize.txt files. Updates the omnipickle.\"\"\"", "[] def run(self): indent_function = self.logger.run_indented_function indent_function(self.find_gsd_txt_files, before_msg=\"Finding GrainSize files\", after_msg=\"Finished!\") indent_function(self.load_data, before_msg=\"Loading", "on step and period is_falling = step[0] == 'f' discharge = int(step[1:-1]) period_time", "1.41, '2' : 2 , '2.8' : 2.83, '4' : 4 , '5.6'", "exactly the same, do in a loop run_data = pd.DataFrame() for filepath in", "step, period, sta_str, scan_length] var_names = index_names + ['scan_name', 'exp_time'] var_vals = index_vals", "c in self.all_data.columns] def update_omnipickle(self): # Add gsd data to omnipickle hm.ensure_dir_exists(settings.cart_pickles_dir) self.omnimanager.add_gsd_data(settings.cart_pickles_dir,", "discharge_index, period_time) # Generate name to grain size fraction file gsf_name = f\"{scan_name}_{sta_str}_GrainSizeFractions.txt\"", "crawler from helpyr import helpyr_misc as hm # currently set to handed 2m", "self.gsd_txt_filepaths = [] def run(self): indent_function = self.logger.run_indented_function indent_function(self.find_gsd_txt_files, before_msg=\"Finding GrainSize files\", after_msg=\"Finished!\")", "n_discharges = len(discharge_order) calc_time = lambda l, d, t: t + 60*(d +", "= [exp_code, step, period, sta_str, scan_length] var_names = index_names + ['scan_name', 'exp_time'] var_vals", "logger #from helpyr import crawler from helpyr import helpyr_misc as hm # currently", "gsd_dir, gsd_name = hm.nsplit(gsd_filepath, 1) gsd_name = gsd_name.split('.', 1)[0] scan_name, sta_str, _ =", "# Load data and set the index label data = self.loader.load_txt(filepath, gsd_txt_kwargs, add_path=False)", "= { '0.5' : 0.5 , '0.71' : 0.71, '1' : 1 ,", "data to combined data self.all_data = pd.concat(run_data_frames, ignore_index=True) self.all_data.set_index(index_names, inplace=True) self.all_data.sort_index(inplace=True) # Convert", "inplace=True) self.all_data.sort_index(inplace=True) # Convert size classes from string to float to make consistent", "indent_function(self.update_omnipickle, before_msg=\"Updating omnipickle\", after_msg=\"Finished!\") indent_function(self.omnimanager.store, kwargs={'overwrite':{'gsd':True}}, before_msg=\"Storing omnipickle\", after_msg=\"Finished!\") #print(self.omnimanager.experiments['1B'].periods[35].gsd_picklepath) def find_gsd_txt_files(self): #", "up loader self.loader = data_loading.DataLoader( self.pickle_destination, logger=self.logger) # Reload omnimanager self.omnimanager = OmnipickleManager(self.logger)", "settings.cart_pickles_dir self.log_filepath = f\"{settings.log_dir}/gsd_processor.txt\" # Start up logger self.logger = logger.Logger(self.log_filepath, default_verbose=True) self.logger.write([\"Begin", "index_vals = [exp_code, step, period, sta_str, scan_length] var_names = index_names + ['scan_name', 'exp_time']", "from string to float to make consistent with # sieve data col_conv =", "gsd data to omnipickle hm.ensure_dir_exists(settings.cart_pickles_dir) self.omnimanager.add_gsd_data(settings.cart_pickles_dir, self.all_data) if __name__ == \"__main__\": # Run", "from helpyr import helpyr_misc as hm # currently set to handed 2m gsd", "asctime from omnipickle_manager import OmnipickleManager import global_settings as settings from helpyr import data_loading", "GSDProcessor: \"\"\" Collects combines the distributed GrainSize.txt files. Updates the omnipickle.\"\"\" def __init__(self):", "omnipickle_manager import OmnipickleManager import global_settings as settings from helpyr import data_loading from helpyr", "len(discharge_order) calc_time = lambda l, d, t: t + 60*(d + 2*l*(n_discharges-1-d)) exp_time", "= pd.concat(run_data_frames, ignore_index=True) self.all_data.set_index(index_names, inplace=True) self.all_data.sort_index(inplace=True) # Convert size classes from string to", "d, t: t + 60*(d + 2*l*(n_discharges-1-d)) exp_time = calc_time(is_falling, discharge_index, period_time) #", "= self.logger.run_indented_function indent_function(self.find_gsd_txt_files, before_msg=\"Finding GrainSize files\", after_msg=\"Finished!\") indent_function(self.load_data, before_msg=\"Loading and merging data\", after_msg=\"Finished!\")", "OmnipickleManager(self.logger) self.omnimanager.restore() self.logger.write(\"Updating experiment definitions\") self.omnimanager.update_tree_definitions() self.gsd_txt_filepaths = [] def run(self): indent_function =", ": 32 , } self.all_data.columns = [col_conv[c] if c in col_conv else c", "os import numpy as np import pandas as pd from time import asctime", "gsf_name) # Both data files are read exactly the same, do in a", "the quick way.... # Search for: self.gsd_txt_filepaths = class GSDProcessor: \"\"\" Collects combines", "distributed GrainSize.txt files. Updates the omnipickle.\"\"\" def __init__(self): self.root = settings.cart_data_dir self.pickle_destination =", "for c in self.all_data.columns] def update_omnipickle(self): # Add gsd data to omnipickle hm.ensure_dir_exists(settings.cart_pickles_dir)" ]
[ "class DataReader(object): \"\"\"the data reader class. a reader for data. Data is not", "segments. The returned utt_info, does not ' \\ 'contain the _part sufix and", "open_fn = gzip.open else: open_fn = open f = open_fn(datafile) datalines = f.readlines()", "the datafile list Args: list_pos: position on the datafile list to read Returns:", "if datafile[-3:] == '.gz': open_fn = gzip.open else: open_fn = open f =", "break # split the name and the data line splitline = line.strip().split(' ')", "as a numpy array\"\"\" line = self.datafile_lines[list_pos] for ind, start_index in enumerate(self.start_index_set): if", "done in data.py. Data is returned in numpy format and is accessed by", "looping over all data. It is currently only used in postprocessing. \"\"\" def", "in postprocessing. \"\"\" def __init__(self, dataconfs, segment_lengths=['full']): \"\"\"DataReader constructor Args: dataconfs: the database", "expecting only 1 file, but this also makes sense? if datafile[-3:] == '.gz':", "# create a processor self.processors.append(processor_factory.factory(proc_cfg['processor'])(proc_cfg, self.segment_lengths)) # get the datafiles lines datafile =", "of the utterance\"\"\" line = self.datafile_lines[list_pos] # split the name and the data", "only 1 processed') self.segment_lengths = segment_lengths self.processors = [] self.start_index_set = [0] self.datafile_lines", "as was done in data.py. Data is returned in numpy format and is", "TODO: for the moment expecting only 1 file, but this also makes sense?", "name of the utterance for the given position from the datafile list Args:", "open_fn = open f = open_fn(datafile) datalines = f.readlines() self.start_index_set.append(self.start_index_set[-1]+len(datalines)) self.datafile_lines.extend(datalines) def __call__(self,", "file, but this also makes sense? if datafile[-3:] == '.gz': open_fn = gzip.open", "not exist' % proc_cfg_file) parsed_proc_cfg = configparser.ConfigParser() parsed_proc_cfg.read(proc_cfg_file) proc_cfg = dict(parsed_proc_cfg.items('processor')) # create", "and the data line splitline = line.strip().split(' ') utt_name = splitline[0] return utt_name", "= gzip.open else: open_fn = open f = open_fn(datafile) datalines = f.readlines() self.start_index_set.append(self.start_index_set[-1]+len(datalines))", "# TODO: for the moment expecting only 1 file, but this also makes", "position on the datafile list to read Returns: The processed data as a", "all data. It is currently only used in postprocessing. \"\"\" def __init__(self, dataconfs,", "dataconfs: the database configuration segment_lengths: A list containing the desired lengths of segments.", "numpy array\"\"\" line = self.datafile_lines[list_pos] for ind, start_index in enumerate(self.start_index_set): if start_index >", "\"\"\"read data from the datafile list Args: list_pos: position on the datafile list", "import gzip import os class DataReader(object): \"\"\"the data reader class. a reader for", "BaseException('%s does not exist' % proc_cfg_file) parsed_proc_cfg = configparser.ConfigParser() parsed_proc_cfg.read(proc_cfg_file) proc_cfg = dict(parsed_proc_cfg.items('processor'))", "os.path.isfile(proc_cfg_file): raise BaseException('%s does not exist' % proc_cfg_file) parsed_proc_cfg = configparser.ConfigParser() parsed_proc_cfg.read(proc_cfg_file) proc_cfg", "processor self.processors.append(processor_factory.factory(proc_cfg['processor'])(proc_cfg, self.segment_lengths)) # get the datafiles lines datafile = dataconf['datafiles'] # TODO:", "class for data\"\"\" from six.moves import configparser from nabu.processing.processors import processor_factory import gzip", "processed, utt_info def get_name_for_pos(self, list_pos): \"\"\" get the name of the utterance for", "containing the desired lengths of segments. Possibly multiple segment lengths \"\"\" if len(segment_lengths)", "splitline[0] dataline = ' '.join(splitline[1:]) # process the dataline processed, utt_info = processor(dataline)", "to read Returns: The processed data as a numpy array\"\"\" line = self.datafile_lines[list_pos]", "sufix and processed returns only 1 processed') self.segment_lengths = segment_lengths self.processors = []", "dataconf in dataconfs: # read the processor config proc_cfg_file = dataconf['processor_config'] if not", "= dataconf['processor_config'] if not os.path.isfile(proc_cfg_file): raise BaseException('%s does not exist' % proc_cfg_file) parsed_proc_cfg", "self.datafile_lines[list_pos] for ind, start_index in enumerate(self.start_index_set): if start_index > list_pos: processor = self.processors[ind-1]", "list_pos): \"\"\" get the name of the utterance for the given position from", "a numpy array\"\"\" line = self.datafile_lines[list_pos] for ind, start_index in enumerate(self.start_index_set): if start_index", "the name of the utterance for the given position from the datafile list", "[] for dataconf in dataconfs: # read the processor config proc_cfg_file = dataconf['processor_config']", "if len(segment_lengths) > 1: print( 'Warning: Not yet implemented __call__ correctly for multiple", "processed returns only 1 processed') self.segment_lengths = segment_lengths self.processors = [] self.start_index_set =", "ind, start_index in enumerate(self.start_index_set): if start_index > list_pos: processor = self.processors[ind-1] break #", "database configuration segment_lengths: A list containing the desired lengths of segments. Possibly multiple", "proc_cfg_file) parsed_proc_cfg = configparser.ConfigParser() parsed_proc_cfg.read(proc_cfg_file) proc_cfg = dict(parsed_proc_cfg.items('processor')) # create a processor self.processors.append(processor_factory.factory(proc_cfg['processor'])(proc_cfg,", "segments. Possibly multiple segment lengths \"\"\" if len(segment_lengths) > 1: print( 'Warning: Not", "= [0] self.datafile_lines = [] for dataconf in dataconfs: # read the processor", "> list_pos: processor = self.processors[ind-1] break # split the name and the data", "a reader for data. Data is not stored in tensorflow format as was", "splitline = line.strip().split(' ') utt_name = splitline[0] dataline = ' '.join(splitline[1:]) # process", "read Returns: The name of the utterance\"\"\" line = self.datafile_lines[list_pos] # split the", "line = self.datafile_lines[list_pos] # split the name and the data line splitline =", "the name and the data line splitline = line.strip().split(' ') utt_name = splitline[0]", "= processed[self.segment_lengths[0]][0] return processed, utt_info def get_name_for_pos(self, list_pos): \"\"\" get the name of", "does not exist' % proc_cfg_file) parsed_proc_cfg = configparser.ConfigParser() parsed_proc_cfg.read(proc_cfg_file) proc_cfg = dict(parsed_proc_cfg.items('processor')) #", "format as was done in data.py. Data is returned in numpy format and", "raise BaseException('%s does not exist' % proc_cfg_file) parsed_proc_cfg = configparser.ConfigParser() parsed_proc_cfg.read(proc_cfg_file) proc_cfg =", "by indexing instead of looping over all data. It is currently only used", "processor = self.processors[ind-1] break # split the name and the data line splitline", "only 1 file, but this also makes sense? if datafile[-3:] == '.gz': open_fn", "from nabu.processing.processors import processor_factory import gzip import os class DataReader(object): \"\"\"the data reader", "Args: list_pos: position on the datafile list to read Returns: The processed data", "list containing the desired lengths of segments. Possibly multiple segment lengths \"\"\" if", "line splitline = line.strip().split(' ') utt_name = splitline[0] dataline = ' '.join(splitline[1:]) #", "__call__ correctly for multiple segments. The returned utt_info, does not ' \\ 'contain", "1 processed! processed = processed[self.segment_lengths[0]][0] return processed, utt_info def get_name_for_pos(self, list_pos): \"\"\" get", "processed = processed[self.segment_lengths[0]][0] return processed, utt_info def get_name_for_pos(self, list_pos): \"\"\" get the name", "list Args: list_pos: position on the datafile list to read Returns: The name", "data.py. Data is returned in numpy format and is accessed by indexing instead", "list_pos): \"\"\"read data from the datafile list Args: list_pos: position on the datafile", "Not yet implemented __call__ correctly for multiple segments. The returned utt_info, does not", "postprocessing. \"\"\" def __init__(self, dataconfs, segment_lengths=['full']): \"\"\"DataReader constructor Args: dataconfs: the database configuration", "dataline = ' '.join(splitline[1:]) # process the dataline processed, utt_info = processor(dataline) utt_info['utt_name']", "contains a reader class for data\"\"\" from six.moves import configparser from nabu.processing.processors import", "gzip import os class DataReader(object): \"\"\"the data reader class. a reader for data.", "currently only used in postprocessing. \"\"\" def __init__(self, dataconfs, segment_lengths=['full']): \"\"\"DataReader constructor Args:", "parsed_proc_cfg.read(proc_cfg_file) proc_cfg = dict(parsed_proc_cfg.items('processor')) # create a processor self.processors.append(processor_factory.factory(proc_cfg['processor'])(proc_cfg, self.segment_lengths)) # get the", "\"\"\" if len(segment_lengths) > 1: print( 'Warning: Not yet implemented __call__ correctly for", "not ' \\ 'contain the _part sufix and processed returns only 1 processed')", "'Warning: Not yet implemented __call__ correctly for multiple segments. The returned utt_info, does", "def __call__(self, list_pos): \"\"\"read data from the datafile list Args: list_pos: position on", "configparser.ConfigParser() parsed_proc_cfg.read(proc_cfg_file) proc_cfg = dict(parsed_proc_cfg.items('processor')) # create a processor self.processors.append(processor_factory.factory(proc_cfg['processor'])(proc_cfg, self.segment_lengths)) # get", "data as a numpy array\"\"\" line = self.datafile_lines[list_pos] for ind, start_index in enumerate(self.start_index_set):", "the moment expecting only 1 file, but this also makes sense? if datafile[-3:]", "multiple segments. The returned utt_info, does not ' \\ 'contain the _part sufix", "[] self.start_index_set = [0] self.datafile_lines = [] for dataconf in dataconfs: # read", "1: print( 'Warning: Not yet implemented __call__ correctly for multiple segments. The returned", "format and is accessed by indexing instead of looping over all data. It", "create a processor self.processors.append(processor_factory.factory(proc_cfg['processor'])(proc_cfg, self.segment_lengths)) # get the datafiles lines datafile = dataconf['datafiles']", "= dataconf['datafiles'] # TODO: for the moment expecting only 1 file, but this", "makes sense? if datafile[-3:] == '.gz': open_fn = gzip.open else: open_fn = open", "is accessed by indexing instead of looping over all data. It is currently", "for dataconf in dataconfs: # read the processor config proc_cfg_file = dataconf['processor_config'] if", "[0] self.datafile_lines = [] for dataconf in dataconfs: # read the processor config", "utt_name = splitline[0] dataline = ' '.join(splitline[1:]) # process the dataline processed, utt_info", "= [] self.start_index_set = [0] self.datafile_lines = [] for dataconf in dataconfs: #", "dataline processed, utt_info = processor(dataline) utt_info['utt_name'] = utt_name # Currently only returning 1", "datafile = dataconf['datafiles'] # TODO: for the moment expecting only 1 file, but", "') utt_name = splitline[0] dataline = ' '.join(splitline[1:]) # process the dataline processed,", "the database configuration segment_lengths: A list containing the desired lengths of segments. Possibly", "Args: dataconfs: the database configuration segment_lengths: A list containing the desired lengths of", "of segments. Possibly multiple segment lengths \"\"\" if len(segment_lengths) > 1: print( 'Warning:", "position from the datafile list Args: list_pos: position on the datafile list to", "utterance\"\"\" line = self.datafile_lines[list_pos] # split the name and the data line splitline", "processed[self.segment_lengths[0]][0] return processed, utt_info def get_name_for_pos(self, list_pos): \"\"\" get the name of the", "a processor self.processors.append(processor_factory.factory(proc_cfg['processor'])(proc_cfg, self.segment_lengths)) # get the datafiles lines datafile = dataconf['datafiles'] #", "list Args: list_pos: position on the datafile list to read Returns: The processed", "return processed, utt_info def get_name_for_pos(self, list_pos): \"\"\" get the name of the utterance", "on the datafile list to read Returns: The name of the utterance\"\"\" line", "of looping over all data. It is currently only used in postprocessing. \"\"\"", "segment_lengths=['full']): \"\"\"DataReader constructor Args: dataconfs: the database configuration segment_lengths: A list containing the", "1 processed') self.segment_lengths = segment_lengths self.processors = [] self.start_index_set = [0] self.datafile_lines =", "constructor Args: dataconfs: the database configuration segment_lengths: A list containing the desired lengths", "self.processors[ind-1] break # split the name and the data line splitline = line.strip().split('", "position on the datafile list to read Returns: The name of the utterance\"\"\"", "= configparser.ConfigParser() parsed_proc_cfg.read(proc_cfg_file) proc_cfg = dict(parsed_proc_cfg.items('processor')) # create a processor self.processors.append(processor_factory.factory(proc_cfg['processor'])(proc_cfg, self.segment_lengths)) #", "# get the datafiles lines datafile = dataconf['datafiles'] # TODO: for the moment", "Possibly multiple segment lengths \"\"\" if len(segment_lengths) > 1: print( 'Warning: Not yet", "open_fn(datafile) datalines = f.readlines() self.start_index_set.append(self.start_index_set[-1]+len(datalines)) self.datafile_lines.extend(datalines) def __call__(self, list_pos): \"\"\"read data from the", "self.datafile_lines[list_pos] # split the name and the data line splitline = line.strip().split(' ')", "\\ 'contain the _part sufix and processed returns only 1 processed') self.segment_lengths =", "sense? if datafile[-3:] == '.gz': open_fn = gzip.open else: open_fn = open f", "dict(parsed_proc_cfg.items('processor')) # create a processor self.processors.append(processor_factory.factory(proc_cfg['processor'])(proc_cfg, self.segment_lengths)) # get the datafiles lines datafile", "utterance for the given position from the datafile list Args: list_pos: position on", "self.start_index_set = [0] self.datafile_lines = [] for dataconf in dataconfs: # read the", "indexing instead of looping over all data. It is currently only used in", "for the given position from the datafile list Args: list_pos: position on the", "= self.datafile_lines[list_pos] # split the name and the data line splitline = line.strip().split('", "import processor_factory import gzip import os class DataReader(object): \"\"\"the data reader class. a", "self.datafile_lines.extend(datalines) def __call__(self, list_pos): \"\"\"read data from the datafile list Args: list_pos: position", "' '.join(splitline[1:]) # process the dataline processed, utt_info = processor(dataline) utt_info['utt_name'] = utt_name", "the processor config proc_cfg_file = dataconf['processor_config'] if not os.path.isfile(proc_cfg_file): raise BaseException('%s does not", "dataconfs, segment_lengths=['full']): \"\"\"DataReader constructor Args: dataconfs: the database configuration segment_lengths: A list containing", "'contain the _part sufix and processed returns only 1 processed') self.segment_lengths = segment_lengths", "data reader class. a reader for data. Data is not stored in tensorflow", "the dataline processed, utt_info = processor(dataline) utt_info['utt_name'] = utt_name # Currently only returning", "configuration segment_lengths: A list containing the desired lengths of segments. Possibly multiple segment", "if start_index > list_pos: processor = self.processors[ind-1] break # split the name and", "is not stored in tensorflow format as was done in data.py. Data is", "the utterance for the given position from the datafile list Args: list_pos: position", "self.segment_lengths = segment_lengths self.processors = [] self.start_index_set = [0] self.datafile_lines = [] for", "import configparser from nabu.processing.processors import processor_factory import gzip import os class DataReader(object): \"\"\"the", "\"\"\" def __init__(self, dataconfs, segment_lengths=['full']): \"\"\"DataReader constructor Args: dataconfs: the database configuration segment_lengths:", "reader for data. Data is not stored in tensorflow format as was done", "process the dataline processed, utt_info = processor(dataline) utt_info['utt_name'] = utt_name # Currently only", "returning 1 processed! processed = processed[self.segment_lengths[0]][0] return processed, utt_info def get_name_for_pos(self, list_pos): \"\"\"", "the _part sufix and processed returns only 1 processed') self.segment_lengths = segment_lengths self.processors", "exist' % proc_cfg_file) parsed_proc_cfg = configparser.ConfigParser() parsed_proc_cfg.read(proc_cfg_file) proc_cfg = dict(parsed_proc_cfg.items('processor')) # create a", "processor config proc_cfg_file = dataconf['processor_config'] if not os.path.isfile(proc_cfg_file): raise BaseException('%s does not exist'", "__call__(self, list_pos): \"\"\"read data from the datafile list Args: list_pos: position on the", "f = open_fn(datafile) datalines = f.readlines() self.start_index_set.append(self.start_index_set[-1]+len(datalines)) self.datafile_lines.extend(datalines) def __call__(self, list_pos): \"\"\"read data", "A list containing the desired lengths of segments. Possibly multiple segment lengths \"\"\"", "datafiles lines datafile = dataconf['datafiles'] # TODO: for the moment expecting only 1", "was done in data.py. Data is returned in numpy format and is accessed", "instead of looping over all data. It is currently only used in postprocessing.", "'.gz': open_fn = gzip.open else: open_fn = open f = open_fn(datafile) datalines =", "open f = open_fn(datafile) datalines = f.readlines() self.start_index_set.append(self.start_index_set[-1]+len(datalines)) self.datafile_lines.extend(datalines) def __call__(self, list_pos): \"\"\"read", "\"\"\"the data reader class. a reader for data. Data is not stored in", "dataconf['datafiles'] # TODO: for the moment expecting only 1 file, but this also", "list_pos: position on the datafile list to read Returns: The processed data as", "= ' '.join(splitline[1:]) # process the dataline processed, utt_info = processor(dataline) utt_info['utt_name'] =", "= processor(dataline) utt_info['utt_name'] = utt_name # Currently only returning 1 processed! processed =", "datalines = f.readlines() self.start_index_set.append(self.start_index_set[-1]+len(datalines)) self.datafile_lines.extend(datalines) def __call__(self, list_pos): \"\"\"read data from the datafile", "' \\ 'contain the _part sufix and processed returns only 1 processed') self.segment_lengths", "segment_lengths self.processors = [] self.start_index_set = [0] self.datafile_lines = [] for dataconf in", "parsed_proc_cfg = configparser.ConfigParser() parsed_proc_cfg.read(proc_cfg_file) proc_cfg = dict(parsed_proc_cfg.items('processor')) # create a processor self.processors.append(processor_factory.factory(proc_cfg['processor'])(proc_cfg, self.segment_lengths))", "on the datafile list to read Returns: The processed data as a numpy", "# split the name and the data line splitline = line.strip().split(' ') utt_name", "Currently only returning 1 processed! processed = processed[self.segment_lengths[0]][0] return processed, utt_info def get_name_for_pos(self,", "only returning 1 processed! processed = processed[self.segment_lengths[0]][0] return processed, utt_info def get_name_for_pos(self, list_pos):", "reader class for data\"\"\" from six.moves import configparser from nabu.processing.processors import processor_factory import", "name and the data line splitline = line.strip().split(' ') utt_name = splitline[0] dataline", "= segment_lengths self.processors = [] self.start_index_set = [0] self.datafile_lines = [] for dataconf", "\"\"\"DataReader constructor Args: dataconfs: the database configuration segment_lengths: A list containing the desired", "and processed returns only 1 processed') self.segment_lengths = segment_lengths self.processors = [] self.start_index_set", "data. Data is not stored in tensorflow format as was done in data.py.", "start_index in enumerate(self.start_index_set): if start_index > list_pos: processor = self.processors[ind-1] break # split", "lines datafile = dataconf['datafiles'] # TODO: for the moment expecting only 1 file,", "utt_info = processor(dataline) utt_info['utt_name'] = utt_name # Currently only returning 1 processed! processed", "utt_info['utt_name'] = utt_name # Currently only returning 1 processed! processed = processed[self.segment_lengths[0]][0] return", "used in postprocessing. \"\"\" def __init__(self, dataconfs, segment_lengths=['full']): \"\"\"DataReader constructor Args: dataconfs: the", "read the processor config proc_cfg_file = dataconf['processor_config'] if not os.path.isfile(proc_cfg_file): raise BaseException('%s does", "data\"\"\" from six.moves import configparser from nabu.processing.processors import processor_factory import gzip import os", "= splitline[0] dataline = ' '.join(splitline[1:]) # process the dataline processed, utt_info =", "Returns: The processed data as a numpy array\"\"\" line = self.datafile_lines[list_pos] for ind,", "datafile list to read Returns: The name of the utterance\"\"\" line = self.datafile_lines[list_pos]", "self.processors.append(processor_factory.factory(proc_cfg['processor'])(proc_cfg, self.segment_lengths)) # get the datafiles lines datafile = dataconf['datafiles'] # TODO: for", "Returns: The name of the utterance\"\"\" line = self.datafile_lines[list_pos] # split the name", "this also makes sense? if datafile[-3:] == '.gz': open_fn = gzip.open else: open_fn", "processor_factory import gzip import os class DataReader(object): \"\"\"the data reader class. a reader", "print( 'Warning: Not yet implemented __call__ correctly for multiple segments. The returned utt_info,", "config proc_cfg_file = dataconf['processor_config'] if not os.path.isfile(proc_cfg_file): raise BaseException('%s does not exist' %", "== '.gz': open_fn = gzip.open else: open_fn = open f = open_fn(datafile) datalines", "returns only 1 processed') self.segment_lengths = segment_lengths self.processors = [] self.start_index_set = [0]", "read Returns: The processed data as a numpy array\"\"\" line = self.datafile_lines[list_pos] for", "only used in postprocessing. \"\"\" def __init__(self, dataconfs, segment_lengths=['full']): \"\"\"DataReader constructor Args: dataconfs:", "get the datafiles lines datafile = dataconf['datafiles'] # TODO: for the moment expecting", "segment_lengths: A list containing the desired lengths of segments. Possibly multiple segment lengths", "nabu.processing.processors import processor_factory import gzip import os class DataReader(object): \"\"\"the data reader class.", "the given position from the datafile list Args: list_pos: position on the datafile", "also makes sense? if datafile[-3:] == '.gz': open_fn = gzip.open else: open_fn =", "= [] for dataconf in dataconfs: # read the processor config proc_cfg_file =", "The name of the utterance\"\"\" line = self.datafile_lines[list_pos] # split the name and", "for ind, start_index in enumerate(self.start_index_set): if start_index > list_pos: processor = self.processors[ind-1] break", "start_index > list_pos: processor = self.processors[ind-1] break # split the name and the", "the utterance\"\"\" line = self.datafile_lines[list_pos] # split the name and the data line", "= f.readlines() self.start_index_set.append(self.start_index_set[-1]+len(datalines)) self.datafile_lines.extend(datalines) def __call__(self, list_pos): \"\"\"read data from the datafile list", "processed data as a numpy array\"\"\" line = self.datafile_lines[list_pos] for ind, start_index in", "> 1: print( 'Warning: Not yet implemented __call__ correctly for multiple segments. The", "lengths \"\"\" if len(segment_lengths) > 1: print( 'Warning: Not yet implemented __call__ correctly", "is returned in numpy format and is accessed by indexing instead of looping", "self.segment_lengths)) # get the datafiles lines datafile = dataconf['datafiles'] # TODO: for the", "Args: list_pos: position on the datafile list to read Returns: The name of", "yet implemented __call__ correctly for multiple segments. The returned utt_info, does not '", "get the name of the utterance for the given position from the datafile", "the datafiles lines datafile = dataconf['datafiles'] # TODO: for the moment expecting only", "reader class. a reader for data. Data is not stored in tensorflow format", "len(segment_lengths) > 1: print( 'Warning: Not yet implemented __call__ correctly for multiple segments.", "desired lengths of segments. Possibly multiple segment lengths \"\"\" if len(segment_lengths) > 1:", "list to read Returns: The name of the utterance\"\"\" line = self.datafile_lines[list_pos] #", "proc_cfg_file = dataconf['processor_config'] if not os.path.isfile(proc_cfg_file): raise BaseException('%s does not exist' % proc_cfg_file)", "from the datafile list Args: list_pos: position on the datafile list to read", "for data\"\"\" from six.moves import configparser from nabu.processing.processors import processor_factory import gzip import", "_part sufix and processed returns only 1 processed') self.segment_lengths = segment_lengths self.processors =", "= utt_name # Currently only returning 1 processed! processed = processed[self.segment_lengths[0]][0] return processed,", "if not os.path.isfile(proc_cfg_file): raise BaseException('%s does not exist' % proc_cfg_file) parsed_proc_cfg = configparser.ConfigParser()", "does not ' \\ 'contain the _part sufix and processed returns only 1", "__init__(self, dataconfs, segment_lengths=['full']): \"\"\"DataReader constructor Args: dataconfs: the database configuration segment_lengths: A list", "split the name and the data line splitline = line.strip().split(' ') utt_name =", "data line splitline = line.strip().split(' ') utt_name = splitline[0] dataline = ' '.join(splitline[1:])", "in enumerate(self.start_index_set): if start_index > list_pos: processor = self.processors[ind-1] break # split the", "the desired lengths of segments. Possibly multiple segment lengths \"\"\" if len(segment_lengths) >", "get_name_for_pos(self, list_pos): \"\"\" get the name of the utterance for the given position", "dataconf['processor_config'] if not os.path.isfile(proc_cfg_file): raise BaseException('%s does not exist' % proc_cfg_file) parsed_proc_cfg =", "not stored in tensorflow format as was done in data.py. Data is returned", "# Currently only returning 1 processed! processed = processed[self.segment_lengths[0]][0] return processed, utt_info def", "and is accessed by indexing instead of looping over all data. It is", "list_pos: processor = self.processors[ind-1] break # split the name and the data line", "correctly for multiple segments. The returned utt_info, does not ' \\ 'contain the", "\"\"\"@file data_reader.py contains a reader class for data\"\"\" from six.moves import configparser from", "else: open_fn = open f = open_fn(datafile) datalines = f.readlines() self.start_index_set.append(self.start_index_set[-1]+len(datalines)) self.datafile_lines.extend(datalines) def", "datafile list to read Returns: The processed data as a numpy array\"\"\" line", "# process the dataline processed, utt_info = processor(dataline) utt_info['utt_name'] = utt_name # Currently", "dataconfs: # read the processor config proc_cfg_file = dataconf['processor_config'] if not os.path.isfile(proc_cfg_file): raise", "segment lengths \"\"\" if len(segment_lengths) > 1: print( 'Warning: Not yet implemented __call__", "accessed by indexing instead of looping over all data. It is currently only", "name and the data line splitline = line.strip().split(' ') utt_name = splitline[0] return", "self.datafile_lines = [] for dataconf in dataconfs: # read the processor config proc_cfg_file", "of the utterance for the given position from the datafile list Args: list_pos:", "= open_fn(datafile) datalines = f.readlines() self.start_index_set.append(self.start_index_set[-1]+len(datalines)) self.datafile_lines.extend(datalines) def __call__(self, list_pos): \"\"\"read data from", "DataReader(object): \"\"\"the data reader class. a reader for data. Data is not stored", "enumerate(self.start_index_set): if start_index > list_pos: processor = self.processors[ind-1] break # split the name", "in numpy format and is accessed by indexing instead of looping over all", "Data is returned in numpy format and is accessed by indexing instead of", "not os.path.isfile(proc_cfg_file): raise BaseException('%s does not exist' % proc_cfg_file) parsed_proc_cfg = configparser.ConfigParser() parsed_proc_cfg.read(proc_cfg_file)", "processed, utt_info = processor(dataline) utt_info['utt_name'] = utt_name # Currently only returning 1 processed!", "processed') self.segment_lengths = segment_lengths self.processors = [] self.start_index_set = [0] self.datafile_lines = []", "utt_info, does not ' \\ 'contain the _part sufix and processed returns only", "from six.moves import configparser from nabu.processing.processors import processor_factory import gzip import os class", "f.readlines() self.start_index_set.append(self.start_index_set[-1]+len(datalines)) self.datafile_lines.extend(datalines) def __call__(self, list_pos): \"\"\"read data from the datafile list Args:", "the data line splitline = line.strip().split(' ') utt_name = splitline[0] dataline = '", "\"\"\" get the name of the utterance for the given position from the", "returned utt_info, does not ' \\ 'contain the _part sufix and processed returns", "self.processors = [] self.start_index_set = [0] self.datafile_lines = [] for dataconf in dataconfs:", "list_pos: position on the datafile list to read Returns: The name of the", "= self.processors[ind-1] break # split the name and the data line splitline =", "The returned utt_info, does not ' \\ 'contain the _part sufix and processed", "# read the processor config proc_cfg_file = dataconf['processor_config'] if not os.path.isfile(proc_cfg_file): raise BaseException('%s", "name of the utterance\"\"\" line = self.datafile_lines[list_pos] # split the name and the", "lengths of segments. Possibly multiple segment lengths \"\"\" if len(segment_lengths) > 1: print(", "moment expecting only 1 file, but this also makes sense? if datafile[-3:] ==", "= dict(parsed_proc_cfg.items('processor')) # create a processor self.processors.append(processor_factory.factory(proc_cfg['processor'])(proc_cfg, self.segment_lengths)) # get the datafiles lines", "in data.py. Data is returned in numpy format and is accessed by indexing", "to read Returns: The name of the utterance\"\"\" line = self.datafile_lines[list_pos] # split", "data. It is currently only used in postprocessing. \"\"\" def __init__(self, dataconfs, segment_lengths=['full']):", "% proc_cfg_file) parsed_proc_cfg = configparser.ConfigParser() parsed_proc_cfg.read(proc_cfg_file) proc_cfg = dict(parsed_proc_cfg.items('processor')) # create a processor", "the datafile list to read Returns: The processed data as a numpy array\"\"\"", "for multiple segments. The returned utt_info, does not ' \\ 'contain the _part", "returned in numpy format and is accessed by indexing instead of looping over", "processor(dataline) utt_info['utt_name'] = utt_name # Currently only returning 1 processed! processed = processed[self.segment_lengths[0]][0]", "given position from the datafile list Args: list_pos: position on the datafile list", "multiple segment lengths \"\"\" if len(segment_lengths) > 1: print( 'Warning: Not yet implemented", "stored in tensorflow format as was done in data.py. Data is returned in", "in dataconfs: # read the processor config proc_cfg_file = dataconf['processor_config'] if not os.path.isfile(proc_cfg_file):", "The processed data as a numpy array\"\"\" line = self.datafile_lines[list_pos] for ind, start_index", "for the moment expecting only 1 file, but this also makes sense? if", "configparser from nabu.processing.processors import processor_factory import gzip import os class DataReader(object): \"\"\"the data", "a reader class for data\"\"\" from six.moves import configparser from nabu.processing.processors import processor_factory", "'.join(splitline[1:]) # process the dataline processed, utt_info = processor(dataline) utt_info['utt_name'] = utt_name #", "import os class DataReader(object): \"\"\"the data reader class. a reader for data. Data", "datafile[-3:] == '.gz': open_fn = gzip.open else: open_fn = open f = open_fn(datafile)", "is currently only used in postprocessing. \"\"\" def __init__(self, dataconfs, segment_lengths=['full']): \"\"\"DataReader constructor", "for data. Data is not stored in tensorflow format as was done in", "= line.strip().split(' ') utt_name = splitline[0] dataline = ' '.join(splitline[1:]) # process the", "the datafile list to read Returns: The name of the utterance\"\"\" line =", "implemented __call__ correctly for multiple segments. The returned utt_info, does not ' \\", "os class DataReader(object): \"\"\"the data reader class. a reader for data. Data is", "data from the datafile list Args: list_pos: position on the datafile list to", "utt_name # Currently only returning 1 processed! processed = processed[self.segment_lengths[0]][0] return processed, utt_info", "but this also makes sense? if datafile[-3:] == '.gz': open_fn = gzip.open else:", "over all data. It is currently only used in postprocessing. \"\"\" def __init__(self,", "It is currently only used in postprocessing. \"\"\" def __init__(self, dataconfs, segment_lengths=['full']): \"\"\"DataReader", "datafile list Args: list_pos: position on the datafile list to read Returns: The", "list to read Returns: The processed data as a numpy array\"\"\" line =", "numpy format and is accessed by indexing instead of looping over all data.", "processed! processed = processed[self.segment_lengths[0]][0] return processed, utt_info def get_name_for_pos(self, list_pos): \"\"\" get the", "= self.datafile_lines[list_pos] for ind, start_index in enumerate(self.start_index_set): if start_index > list_pos: processor =", "six.moves import configparser from nabu.processing.processors import processor_factory import gzip import os class DataReader(object):", "tensorflow format as was done in data.py. Data is returned in numpy format", "def get_name_for_pos(self, list_pos): \"\"\" get the name of the utterance for the given", "1 file, but this also makes sense? if datafile[-3:] == '.gz': open_fn =", "self.start_index_set.append(self.start_index_set[-1]+len(datalines)) self.datafile_lines.extend(datalines) def __call__(self, list_pos): \"\"\"read data from the datafile list Args: list_pos:", "line.strip().split(' ') utt_name = splitline[0] dataline = ' '.join(splitline[1:]) # process the dataline", "gzip.open else: open_fn = open f = open_fn(datafile) datalines = f.readlines() self.start_index_set.append(self.start_index_set[-1]+len(datalines)) self.datafile_lines.extend(datalines)", "data_reader.py contains a reader class for data\"\"\" from six.moves import configparser from nabu.processing.processors", "def __init__(self, dataconfs, segment_lengths=['full']): \"\"\"DataReader constructor Args: dataconfs: the database configuration segment_lengths: A", "in tensorflow format as was done in data.py. Data is returned in numpy", "class. a reader for data. Data is not stored in tensorflow format as", "utt_info def get_name_for_pos(self, list_pos): \"\"\" get the name of the utterance for the", "Data is not stored in tensorflow format as was done in data.py. Data", "proc_cfg = dict(parsed_proc_cfg.items('processor')) # create a processor self.processors.append(processor_factory.factory(proc_cfg['processor'])(proc_cfg, self.segment_lengths)) # get the datafiles", "array\"\"\" line = self.datafile_lines[list_pos] for ind, start_index in enumerate(self.start_index_set): if start_index > list_pos:", "= open f = open_fn(datafile) datalines = f.readlines() self.start_index_set.append(self.start_index_set[-1]+len(datalines)) self.datafile_lines.extend(datalines) def __call__(self, list_pos):", "and the data line splitline = line.strip().split(' ') utt_name = splitline[0] dataline =", "line = self.datafile_lines[list_pos] for ind, start_index in enumerate(self.start_index_set): if start_index > list_pos: processor" ]
[ "ISBN. \"\"\" self.assertTrue(hasattr(self.cls, 'isbn')) def test_instance_has_isbn(self): \"\"\" Each Title should have an ISBN.", "should have an ISBN. \"\"\" self.assertTrue(hasattr(self.cls, 'isbn')) def test_instance_has_isbn(self): \"\"\" Each Title should", "'isbn')) def test_instance_has_isbn(self): \"\"\" Each Title should have an ISBN. \"\"\" self.assertTrue(hasattr(self.instance, 'isbn'))", "name. \"\"\" self.assertTrue(hasattr(self.cls, 'name')) def test_instance_has_name(self): \"\"\" Each Title should have a name.", "have an ISBN. \"\"\" self.assertTrue(hasattr(self.cls, 'isbn')) def test_instance_has_isbn(self): \"\"\" Each Title should have", "\"\"\" self.assertTrue(hasattr(self.cls, 'name')) def test_instance_has_name(self): \"\"\" Each Title should have a name. \"\"\"", "test_instance_has_isbn(self): \"\"\" Each Title should have an ISBN. \"\"\" self.assertTrue(hasattr(self.instance, 'isbn')) def test_class_has_name(self):", "\"\"\" Each Title should have a name. \"\"\" self.assertTrue(hasattr(self.instance, 'name')) def test_class_has_title_from_isbn(self): \"\"\"", "test_class_has_name(self): \"\"\" The Title model should have a name. \"\"\" self.assertTrue(hasattr(self.cls, 'name')) def", "TestCase from library.models.title import Title from tests.unit.models.test_base import BaseTest class TestTitle(BaseTest, TestCase): cls", "from tests.unit.models.test_base import BaseTest class TestTitle(BaseTest, TestCase): cls = Title def test_class_has_isbn(self): \"\"\"", "library.models.title import Title from tests.unit.models.test_base import BaseTest class TestTitle(BaseTest, TestCase): cls = Title", "\"\"\" Each Title should have an ISBN. \"\"\" self.assertTrue(hasattr(self.instance, 'isbn')) def test_class_has_name(self): \"\"\"", "self.assertTrue(hasattr(self.cls, 'name')) def test_instance_has_name(self): \"\"\" Each Title should have a name. \"\"\" self.assertTrue(hasattr(self.instance,", "TestTitle(BaseTest, TestCase): cls = Title def test_class_has_isbn(self): \"\"\" The Title model should have", "= Title def test_class_has_isbn(self): \"\"\" The Title model should have an ISBN. \"\"\"", "should have a name. \"\"\" self.assertTrue(hasattr(self.instance, 'name')) def test_class_has_title_from_isbn(self): \"\"\" The Title model", "self.assertTrue(hasattr(self.cls, 'isbn')) def test_instance_has_isbn(self): \"\"\" Each Title should have an ISBN. \"\"\" self.assertTrue(hasattr(self.instance,", "have an ISBN. \"\"\" self.assertTrue(hasattr(self.instance, 'isbn')) def test_class_has_name(self): \"\"\" The Title model should", "def test_class_has_isbn(self): \"\"\" The Title model should have an ISBN. \"\"\" self.assertTrue(hasattr(self.cls, 'isbn'))", "name. \"\"\" self.assertTrue(hasattr(self.instance, 'name')) def test_class_has_title_from_isbn(self): \"\"\" The Title model should have a", "import BaseTest class TestTitle(BaseTest, TestCase): cls = Title def test_class_has_isbn(self): \"\"\" The Title", "\"\"\" The Title model should have an ISBN. \"\"\" self.assertTrue(hasattr(self.cls, 'isbn')) def test_instance_has_isbn(self):", "Title model should have a constructor which takes an ISBN. \"\"\" self.assertTrue(hasattr(self.cls, 'title_from_isbn'))", "should have an ISBN. \"\"\" self.assertTrue(hasattr(self.instance, 'isbn')) def test_class_has_name(self): \"\"\" The Title model", "import Title from tests.unit.models.test_base import BaseTest class TestTitle(BaseTest, TestCase): cls = Title def", "BaseTest class TestTitle(BaseTest, TestCase): cls = Title def test_class_has_isbn(self): \"\"\" The Title model", "test_instance_has_name(self): \"\"\" Each Title should have a name. \"\"\" self.assertTrue(hasattr(self.instance, 'name')) def test_class_has_title_from_isbn(self):", "an ISBN. \"\"\" self.assertTrue(hasattr(self.cls, 'isbn')) def test_instance_has_isbn(self): \"\"\" Each Title should have an", "self.assertTrue(hasattr(self.instance, 'isbn')) def test_class_has_name(self): \"\"\" The Title model should have a name. \"\"\"", "test_class_has_title_from_isbn(self): \"\"\" The Title model should have a constructor which takes an ISBN.", "Each Title should have a name. \"\"\" self.assertTrue(hasattr(self.instance, 'name')) def test_class_has_title_from_isbn(self): \"\"\" The", "\"\"\" self.assertTrue(hasattr(self.cls, 'isbn')) def test_instance_has_isbn(self): \"\"\" Each Title should have an ISBN. \"\"\"", "test_class_has_isbn(self): \"\"\" The Title model should have an ISBN. \"\"\" self.assertTrue(hasattr(self.cls, 'isbn')) def", "self.assertTrue(hasattr(self.instance, 'name')) def test_class_has_title_from_isbn(self): \"\"\" The Title model should have a constructor which", "def test_instance_has_isbn(self): \"\"\" Each Title should have an ISBN. \"\"\" self.assertTrue(hasattr(self.instance, 'isbn')) def", "from unittest import TestCase from library.models.title import Title from tests.unit.models.test_base import BaseTest class", "import TestCase from library.models.title import Title from tests.unit.models.test_base import BaseTest class TestTitle(BaseTest, TestCase):", "'isbn')) def test_class_has_name(self): \"\"\" The Title model should have a name. \"\"\" self.assertTrue(hasattr(self.cls,", "The Title model should have a constructor which takes an ISBN. \"\"\" self.assertTrue(hasattr(self.cls,", "model should have a name. \"\"\" self.assertTrue(hasattr(self.cls, 'name')) def test_instance_has_name(self): \"\"\" Each Title", "TestCase): cls = Title def test_class_has_isbn(self): \"\"\" The Title model should have an", "tests.unit.models.test_base import BaseTest class TestTitle(BaseTest, TestCase): cls = Title def test_class_has_isbn(self): \"\"\" The", "\"\"\" self.assertTrue(hasattr(self.instance, 'name')) def test_class_has_title_from_isbn(self): \"\"\" The Title model should have a constructor", "\"\"\" The Title model should have a constructor which takes an ISBN. \"\"\"", "have a name. \"\"\" self.assertTrue(hasattr(self.instance, 'name')) def test_class_has_title_from_isbn(self): \"\"\" The Title model should", "a name. \"\"\" self.assertTrue(hasattr(self.cls, 'name')) def test_instance_has_name(self): \"\"\" Each Title should have a", "def test_instance_has_name(self): \"\"\" Each Title should have a name. \"\"\" self.assertTrue(hasattr(self.instance, 'name')) def", "model should have an ISBN. \"\"\" self.assertTrue(hasattr(self.cls, 'isbn')) def test_instance_has_isbn(self): \"\"\" Each Title", "a name. \"\"\" self.assertTrue(hasattr(self.instance, 'name')) def test_class_has_title_from_isbn(self): \"\"\" The Title model should have", "The Title model should have a name. \"\"\" self.assertTrue(hasattr(self.cls, 'name')) def test_instance_has_name(self): \"\"\"", "def test_class_has_name(self): \"\"\" The Title model should have a name. \"\"\" self.assertTrue(hasattr(self.cls, 'name'))", "from library.models.title import Title from tests.unit.models.test_base import BaseTest class TestTitle(BaseTest, TestCase): cls =", "'name')) def test_instance_has_name(self): \"\"\" Each Title should have a name. \"\"\" self.assertTrue(hasattr(self.instance, 'name'))", "unittest import TestCase from library.models.title import Title from tests.unit.models.test_base import BaseTest class TestTitle(BaseTest,", "def test_class_has_title_from_isbn(self): \"\"\" The Title model should have a constructor which takes an", "Title should have a name. \"\"\" self.assertTrue(hasattr(self.instance, 'name')) def test_class_has_title_from_isbn(self): \"\"\" The Title", "Title from tests.unit.models.test_base import BaseTest class TestTitle(BaseTest, TestCase): cls = Title def test_class_has_isbn(self):", "\"\"\" The Title model should have a name. \"\"\" self.assertTrue(hasattr(self.cls, 'name')) def test_instance_has_name(self):", "Title model should have a name. \"\"\" self.assertTrue(hasattr(self.cls, 'name')) def test_instance_has_name(self): \"\"\" Each", "cls = Title def test_class_has_isbn(self): \"\"\" The Title model should have an ISBN.", "\"\"\" self.assertTrue(hasattr(self.instance, 'isbn')) def test_class_has_name(self): \"\"\" The Title model should have a name.", "The Title model should have an ISBN. \"\"\" self.assertTrue(hasattr(self.cls, 'isbn')) def test_instance_has_isbn(self): \"\"\"", "'name')) def test_class_has_title_from_isbn(self): \"\"\" The Title model should have a constructor which takes", "Title def test_class_has_isbn(self): \"\"\" The Title model should have an ISBN. \"\"\" self.assertTrue(hasattr(self.cls,", "Title model should have an ISBN. \"\"\" self.assertTrue(hasattr(self.cls, 'isbn')) def test_instance_has_isbn(self): \"\"\" Each", "an ISBN. \"\"\" self.assertTrue(hasattr(self.instance, 'isbn')) def test_class_has_name(self): \"\"\" The Title model should have", "Each Title should have an ISBN. \"\"\" self.assertTrue(hasattr(self.instance, 'isbn')) def test_class_has_name(self): \"\"\" The", "should have a name. \"\"\" self.assertTrue(hasattr(self.cls, 'name')) def test_instance_has_name(self): \"\"\" Each Title should", "have a name. \"\"\" self.assertTrue(hasattr(self.cls, 'name')) def test_instance_has_name(self): \"\"\" Each Title should have", "Title should have an ISBN. \"\"\" self.assertTrue(hasattr(self.instance, 'isbn')) def test_class_has_name(self): \"\"\" The Title", "ISBN. \"\"\" self.assertTrue(hasattr(self.instance, 'isbn')) def test_class_has_name(self): \"\"\" The Title model should have a", "class TestTitle(BaseTest, TestCase): cls = Title def test_class_has_isbn(self): \"\"\" The Title model should" ]
[ "x=x_train, y=y_train, epochs=self.exp_config[\"run_config\"][\"epochs\"], batch_size=self.exp_config[\"run_config\"][\"batch_size\"], validation_data=val, class_weight=self.exp_config[\"run_config\"][\"class_weight\"], callbacks=callbacks, verbose=self.exp_config[\"run_config\"][\"fit_verbosity\"], ) fitable.fit(**kwargs) return fitable def", "fitable instance. :param loader_config: Optional dict. Contains data which can be used to", ":return: Loader object and the data returned by that Loader's get_data method. \"\"\"", "can be used to create a new fitable instance. :param loader_config: Optional dict.", "= None) -> Tuple[DataLoader, Tuple]: \"\"\" Obtains a loader using ingredients.get_loader and self.exp_config['loader_config']", "fit method. :param fitable_config: Optional dict. Contains data which can be used to", "Run, fitable: Model, test_data: tuple) -> float: \"\"\" :param fitable: tensorflow.keras.Model object. :param", "= None) -> Model: \"\"\" Defines and compiles a fitable (keras.model or keras_tuner.tuner)", "model = builder.get_model() model.compile(**compile_kwargs) return model def _fit( self, run: Run, fitable: Model,", "calls either get_builder, or get_hyper_factory, depending on which type of fitable is beind", "= fitable.evaluate(x=x_test, y=y_test, verbose=0) print(f\"Test split results: {loss}\") return loss def _save_fitable(self, run:", "Loader's get_data method. \"\"\" config = config or self.exp_config[\"loader_config\"] loader = get_data_loader(**config) if", "fitable: tensorflow.keras.Model object. \"\"\" path = self.exp_config[\"run_config\"][\"model_path\"] if self.exp_config[\"run_config\"][\"save_verbosity\"] > 0: fitable.summary() fitable.save(self.exp_config[\"run_config\"][\"model_path\"])", ":param run: sacred.Run object. See sacred documentation for details on utility. :param fitable:", "or self._load_fitable(loader, fitable_config) fitable = self._fit(run, fitable, data) if self.exp_config[\"run_config\"][\"test\"]: self._test_fitable(run, fitable, data[-1])", "import Run import tensorflow as tf from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard from tensorflow.keras.models", "dict( loss=run_config[\"loss\"], loss_weights=run_config[\"loss_weights\"], optimizer=run_config[\"optimizer\"], metrics=run_config[\"metrics\"], run_eagerly=run_config[\"run_eagerly\"], ) if run_config[\"use_strategy\"]: strategy = tf.distribute.MirroredStrategy() with", "See sacred documentation for more details on utility. :param fitable: Optional tensorflow.keras.Model or", "and test data in the form (train, val, test), where train is the", "that Loader's get_data method. \"\"\" config = config or self.exp_config[\"loader_config\"] loader = get_data_loader(**config)", "fitable_config: dict = None, loader_config: dict = None, ): \"\"\" Private method containing", "Scalar test_loss value. \"\"\" if test_data is None: return 0.0 x_test, y_test =", "fitable, data[-1]) if self.exp_config[\"run_config\"][\"save_model\"]: self._save_fitable(run, fitable) return fitable def _load_data(self, config: dict =", "if self.exp_config[\"run_config\"][\"select_few\"]: data = loader.few_examples(**config[\"load_kwargs\"]) else: data = loader.load_data(**config[\"load_kwargs\"]) return loader, data def", "or keras_tuner.tuner) which implements a 'fit' method. This method calls either get_builder, or", "fitable_config or self.exp_config[\"builder_config\"] conf = dict( **fitable_config, max_z=loader.max_z, num_points=loader.num_points, mu=loader.mu, sigma=loader.sigma, ) builder", "loss_weights=run_config[\"loss_weights\"], optimizer=run_config[\"optimizer\"], metrics=run_config[\"metrics\"], run_eagerly=run_config[\"run_eagerly\"], ) if run_config[\"use_strategy\"]: strategy = tf.distribute.MirroredStrategy() with strategy.scope(): model", "fitable: tensorflow.keras.Model object. :param data: tuple. train, validation, and test data in the", "loader: DataLoader, fitable_config: dict = None) -> Model: \"\"\" Defines and compiles a", "form (train, val, test), where train is the tuple (x_train, y_train). :param callbacks:", "see sacred documentation for more details on utility. :param fitable: tensorflow.keras.Model object. \"\"\"", "keras_tuner.tuner) which implements a 'fit' method. This method calls either get_builder, or get_hyper_factory,", "fitable: Model, data: tuple, callbacks: list = None, ) -> Model: \"\"\" :param", "seed: int, fitable: Model = None, fitable_config: dict = None, loader_config: dict =", ":param run: sacred.Run object. see sacred documentation for more details on utility. :param", "builder.get_model() model.compile(**compile_kwargs) return model def _fit( self, run: Run, fitable: Model, data: tuple,", "= self.exp_config[\"run_config\"][\"model_path\"] if self.exp_config[\"run_config\"][\"save_verbosity\"] > 0: fitable.summary() fitable.save(self.exp_config[\"run_config\"][\"model_path\"]) run.add_artifact(path) def _new_model_path(self, name: str):", "fitable.save(self.exp_config[\"run_config\"][\"model_path\"]) run.add_artifact(path) def _new_model_path(self, name: str): model_path = Path(self.exp_config[\"run_config\"][\"model_path\"]).parent / name self.exp_config[\"run_config\"][\"model_path\"] =", "return fitable def _test_fitable(self, run: Run, fitable: Model, test_data: tuple) -> float: \"\"\"", "data returned by that Loader's get_data method. \"\"\" config = config or self.exp_config[\"loader_config\"]", "split results: {loss}\") return loss def _save_fitable(self, run: Run, fitable: Model): \"\"\" :param", "self.exp_config[\"run_config\"][\"test\"]: self._test_fitable(run, fitable, data[-1]) if self.exp_config[\"run_config\"][\"save_model\"]: self._save_fitable(run, fitable) return fitable def _load_data(self, config:", "0.0 x_test, y_test = test_data loss = fitable.evaluate(x=x_test, y=y_test, verbose=0) print(f\"Test split results:", "tensorflow.keras.Model object. \"\"\" path = self.exp_config[\"run_config\"][\"model_path\"] if self.exp_config[\"run_config\"][\"save_verbosity\"] > 0: fitable.summary() fitable.save(self.exp_config[\"run_config\"][\"model_path\"]) run.add_artifact(path)", "results: {loss}\") return loss def _save_fitable(self, run: Run, fitable: Model): \"\"\" :param run:", "object. :param test_data: tuple. contains (x_test, y_test). :return: float. Scalar test_loss value. \"\"\"", "be used to create a new DataLoader instance. \"\"\" loader, data = self._load_data(loader_config)", "data def _load_fitable(self, loader: DataLoader, fitable_config: dict = None) -> Model: \"\"\" Defines", "from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard from tensorflow.keras.models import Model from .job import Job", "data which can be used to create a new DataLoader instance. \"\"\" loader,", "= dict( **fitable_config, max_z=loader.max_z, num_points=loader.num_points, mu=loader.mu, sigma=loader.sigma, ) builder = get_builder(**conf) run_config =", "get_hyper_factory, depending on which type of fitable is beind loaded. :return: Model or", "is a default workflow for a basic keras/kerastuner type job. :param run: sacred.Run", "= fitable_config or self.exp_config[\"builder_config\"] conf = dict( **fitable_config, max_z=loader.max_z, num_points=loader.num_points, mu=loader.mu, sigma=loader.sigma, )", "specific data_loader class. :return: Loader object and the data returned by that Loader's", ":return: float. Scalar test_loss value. \"\"\" if test_data is None: return 0.0 x_test,", "model = builder.get_model() model.compile(**compile_kwargs) else: model = builder.get_model() model.compile(**compile_kwargs) return model def _fit(", "used to create a new DataLoader instance. \"\"\" loader, data = self._load_data(loader_config) fitable", "as tf from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard from tensorflow.keras.models import Model from .job", "a default workflow for a basic keras/kerastuner type job. :param run: sacred.Run object.", "beind loaded. :return: Model or Tuner object. \"\"\" fitable_config = fitable_config or self.exp_config[\"builder_config\"]", "return fitable def _load_data(self, config: dict = None) -> Tuple[DataLoader, Tuple]: \"\"\" Obtains", "None: return 0.0 x_test, y_test = test_data loss = fitable.evaluate(x=x_test, y=y_test, verbose=0) print(f\"Test", "float: \"\"\" :param fitable: tensorflow.keras.Model object. :param test_data: tuple. contains (x_test, y_test). :return:", "test_data: tuple) -> float: \"\"\" :param fitable: tensorflow.keras.Model object. :param test_data: tuple. contains", "None, ) -> Model: \"\"\" :param run: sacred.Run object. See sacred documentation for", "fitable.evaluate(x=x_test, y=y_test, verbose=0) print(f\"Test split results: {loss}\") return loss def _save_fitable(self, run: Run,", "epochs=self.exp_config[\"run_config\"][\"epochs\"], batch_size=self.exp_config[\"run_config\"][\"batch_size\"], validation_data=val, class_weight=self.exp_config[\"run_config\"][\"class_weight\"], callbacks=callbacks, verbose=self.exp_config[\"run_config\"][\"fit_verbosity\"], ) fitable.fit(**kwargs) return fitable def _test_fitable(self, run:", "loader using ingredients.get_loader and self.exp_config['loader_config'] :param config: Optional dict. config passed to get_data_loader", "default workflow for a basic keras/kerastuner type job. :param run: sacred.Run object. See", "-> Model: \"\"\" Defines and compiles a fitable (keras.model or keras_tuner.tuner) which implements", ":param callbacks: Optional list. List of tensorflow.keras.Callback objects to pass to fitable.fit method.", "fitable = self._fit(run, fitable, data) if self.exp_config[\"run_config\"][\"test\"]: self._test_fitable(run, fitable, data[-1]) if self.exp_config[\"run_config\"][\"save_model\"]: self._save_fitable(run,", "the tuple (x_train, y_train). :param callbacks: Optional list. List of tensorflow.keras.Callback objects to", ":param loader_config: Optional dict. Contains data which can be used to create a", "create a new fitable instance. :param loader_config: Optional dict. Contains data which can", "\"\"\" fitable_config = fitable_config or self.exp_config[\"builder_config\"] conf = dict( **fitable_config, max_z=loader.max_z, num_points=loader.num_points, mu=loader.mu,", "= dict( x=x_train, y=y_train, epochs=self.exp_config[\"run_config\"][\"epochs\"], batch_size=self.exp_config[\"run_config\"][\"batch_size\"], validation_data=val, class_weight=self.exp_config[\"run_config\"][\"class_weight\"], callbacks=callbacks, verbose=self.exp_config[\"run_config\"][\"fit_verbosity\"], ) fitable.fit(**kwargs) return", "containing the actual work completed by the job. Implemented is a default workflow", "Model from .job import Job from ..ingredients import ( get_data_loader, get_builder, ) from", "metrics=run_config[\"metrics\"], run_eagerly=run_config[\"run_eagerly\"], ) if run_config[\"use_strategy\"]: strategy = tf.distribute.MirroredStrategy() with strategy.scope(): model = builder.get_model()", "test), where train is the tuple (x_train, y_train). :param callbacks: Optional list. List", "optimizer=run_config[\"optimizer\"], metrics=run_config[\"metrics\"], run_eagerly=run_config[\"run_eagerly\"], ) if run_config[\"use_strategy\"]: strategy = tf.distribute.MirroredStrategy() with strategy.scope(): model =", "fitable def _test_fitable(self, run: Run, fitable: Model, test_data: tuple) -> float: \"\"\" :param", "if self.exp_config[\"run_config\"][\"use_default_callbacks\"]: callbacks.extend( [ TensorBoard( **dict( **self.exp_config[\"tb_config\"], log_dir=tensorboard_directory, ) ), ReduceLROnPlateau(**self.exp_config[\"lr_config\"]), ] )", ":param fitable: Optional tensorflow.keras.Model or kerastuner.Tuner object. Model-like which contains a fit method.", "= None, ) -> Model: \"\"\" :param run: sacred.Run object. See sacred documentation", "Optional tensorflow.keras.Model or kerastuner.Tuner object. Model-like which contains a fit method. :param fitable_config:", "tensorflow.keras.models import Model from .job import Job from ..ingredients import ( get_data_loader, get_builder,", "-> float: \"\"\" :param fitable: tensorflow.keras.Model object. :param test_data: tuple. contains (x_test, y_test).", "on utility. :param fitable: Optional tensorflow.keras.Model or kerastuner.Tuner object. Model-like which contains a", "on utility. :param fitable: tensorflow.keras.Model object. :param data: tuple. train, validation, and test", "Model): \"\"\" :param run: sacred.Run object. see sacred documentation for more details on", "model def _fit( self, run: Run, fitable: Model, data: tuple, callbacks: list =", ":param test_data: tuple. contains (x_test, y_test). :return: float. Scalar test_loss value. \"\"\" if", "loader.few_examples(**config[\"load_kwargs\"]) else: data = loader.load_data(**config[\"load_kwargs\"]) return loader, data def _load_fitable(self, loader: DataLoader, fitable_config:", "callbacks = callbacks or [] if self.exp_config[\"run_config\"][\"use_default_callbacks\"]: callbacks.extend( [ TensorBoard( **dict( **self.exp_config[\"tb_config\"], log_dir=tensorboard_directory,", "class KerasJob(Job): def _main( self, run: Run, seed: int, fitable: Model = None,", "method. :param fitable_config: Optional dict. Contains data which can be used to create", "from sacred.run import Run import tensorflow as tf from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard", "Run import tensorflow as tf from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard from tensorflow.keras.models import", "object. See sacred documentation for details on utility. :param fitable: tensorflow.keras.Model object. :param", "int, fitable: Model = None, fitable_config: dict = None, loader_config: dict = None,", "model.compile(**compile_kwargs) return model def _fit( self, run: Run, fitable: Model, data: tuple, callbacks:", "data_loader class. :return: Loader object and the data returned by that Loader's get_data", "run_config = self.exp_config[\"run_config\"] compile_kwargs = dict( loss=run_config[\"loss\"], loss_weights=run_config[\"loss_weights\"], optimizer=run_config[\"optimizer\"], metrics=run_config[\"metrics\"], run_eagerly=run_config[\"run_eagerly\"], ) if", "data = loader.load_data(**config[\"load_kwargs\"]) return loader, data def _load_fitable(self, loader: DataLoader, fitable_config: dict =", "typing import Tuple from sacred.run import Run import tensorflow as tf from tensorflow.keras.callbacks", "be used to create a new fitable instance. :param loader_config: Optional dict. Contains", "details on utility. :param fitable: Optional tensorflow.keras.Model or kerastuner.Tuner object. Model-like which contains", "a loader using ingredients.get_loader and self.exp_config['loader_config'] :param config: Optional dict. config passed to", "self.exp_config[\"run_config\"][\"root_dir\"] / \"logs\" (x_train, y_train), val, _ = data callbacks = callbacks or", "= self._load_data(loader_config) fitable = fitable or self._load_fitable(loader, fitable_config) fitable = self._fit(run, fitable, data)", "0: fitable.summary() fitable.save(self.exp_config[\"run_config\"][\"model_path\"]) run.add_artifact(path) def _new_model_path(self, name: str): model_path = Path(self.exp_config[\"run_config\"][\"model_path\"]).parent / name", "object. \"\"\" path = self.exp_config[\"run_config\"][\"model_path\"] if self.exp_config[\"run_config\"][\"save_verbosity\"] > 0: fitable.summary() fitable.save(self.exp_config[\"run_config\"][\"model_path\"]) run.add_artifact(path) def", "_new_model_path(self, name: str): model_path = Path(self.exp_config[\"run_config\"][\"model_path\"]).parent / name self.exp_config[\"run_config\"][\"model_path\"] = model_path return model_path", "loader.load_data(**config[\"load_kwargs\"]) return loader, data def _load_fitable(self, loader: DataLoader, fitable_config: dict = None) ->", "def _test_fitable(self, run: Run, fitable: Model, test_data: tuple) -> float: \"\"\" :param fitable:", "self.exp_config[\"run_config\"] compile_kwargs = dict( loss=run_config[\"loss\"], loss_weights=run_config[\"loss_weights\"], optimizer=run_config[\"optimizer\"], metrics=run_config[\"metrics\"], run_eagerly=run_config[\"run_eagerly\"], ) if run_config[\"use_strategy\"]: strategy", "KerasJob(Job): def _main( self, run: Run, seed: int, fitable: Model = None, fitable_config:", "get_builder(**conf) run_config = self.exp_config[\"run_config\"] compile_kwargs = dict( loss=run_config[\"loss\"], loss_weights=run_config[\"loss_weights\"], optimizer=run_config[\"optimizer\"], metrics=run_config[\"metrics\"], run_eagerly=run_config[\"run_eagerly\"], )", "loader, data = self._load_data(loader_config) fitable = fitable or self._load_fitable(loader, fitable_config) fitable = self._fit(run,", "\"\"\" Obtains a loader using ingredients.get_loader and self.exp_config['loader_config'] :param config: Optional dict. config", "run: sacred.Run object. See sacred documentation for more details on utility. :param fitable:", "fitable) return fitable def _load_data(self, config: dict = None) -> Tuple[DataLoader, Tuple]: \"\"\"", "y_train). :param callbacks: Optional list. List of tensorflow.keras.Callback objects to pass to fitable.fit", "workflow for a basic keras/kerastuner type job. :param run: sacred.Run object. See sacred", "self.exp_config[\"run_config\"][\"use_default_callbacks\"]: callbacks.extend( [ TensorBoard( **dict( **self.exp_config[\"tb_config\"], log_dir=tensorboard_directory, ) ), ReduceLROnPlateau(**self.exp_config[\"lr_config\"]), ] ) kwargs", "run_eagerly=run_config[\"run_eagerly\"], ) if run_config[\"use_strategy\"]: strategy = tf.distribute.MirroredStrategy() with strategy.scope(): model = builder.get_model() model.compile(**compile_kwargs)", "{loss}\") return loss def _save_fitable(self, run: Run, fitable: Model): \"\"\" :param run: sacred.Run", "Tuple[DataLoader, Tuple]: \"\"\" Obtains a loader using ingredients.get_loader and self.exp_config['loader_config'] :param config: Optional", "import DataLoader class KerasJob(Job): def _main( self, run: Run, seed: int, fitable: Model", "can be used to create a new DataLoader instance. \"\"\" loader, data =", "sacred documentation for more details on utility. :param fitable: tensorflow.keras.Model object. \"\"\" path", "def _fit( self, run: Run, fitable: Model, data: tuple, callbacks: list = None,", "get_builder, ) from ..loaders import DataLoader class KerasJob(Job): def _main( self, run: Run,", "val, test), where train is the tuple (x_train, y_train). :param callbacks: Optional list.", "data = loader.few_examples(**config[\"load_kwargs\"]) else: data = loader.load_data(**config[\"load_kwargs\"]) return loader, data def _load_fitable(self, loader:", "= tf.distribute.MirroredStrategy() with strategy.scope(): model = builder.get_model() model.compile(**compile_kwargs) else: model = builder.get_model() model.compile(**compile_kwargs)", "documentation for details on utility. :param fitable: tensorflow.keras.Model object. :param data: tuple. train,", "DataLoader instance. \"\"\" loader, data = self._load_data(loader_config) fitable = fitable or self._load_fitable(loader, fitable_config)", "DataLoader class KerasJob(Job): def _main( self, run: Run, seed: int, fitable: Model =", "ReduceLROnPlateau, TensorBoard from tensorflow.keras.models import Model from .job import Job from ..ingredients import", "by that Loader's get_data method. \"\"\" config = config or self.exp_config[\"loader_config\"] loader =", "object. \"\"\" tensorboard_directory = self.exp_config[\"run_config\"][\"root_dir\"] / \"logs\" (x_train, y_train), val, _ = data", "job. :param run: sacred.Run object. See sacred documentation for more details on utility.", "(x_train, y_train), val, _ = data callbacks = callbacks or [] if self.exp_config[\"run_config\"][\"use_default_callbacks\"]:", ":param config: Optional dict. config passed to get_data_loader to obtain specific data_loader class.", "passed to get_data_loader to obtain specific data_loader class. :return: Loader object and the", "a basic keras/kerastuner type job. :param run: sacred.Run object. See sacred documentation for", "y=y_test, verbose=0) print(f\"Test split results: {loss}\") return loss def _save_fitable(self, run: Run, fitable:", "self.exp_config['loader_config'] :param config: Optional dict. config passed to get_data_loader to obtain specific data_loader", "sacred documentation for details on utility. :param fitable: tensorflow.keras.Model object. :param data: tuple.", "callbacks or [] if self.exp_config[\"run_config\"][\"use_default_callbacks\"]: callbacks.extend( [ TensorBoard( **dict( **self.exp_config[\"tb_config\"], log_dir=tensorboard_directory, ) ),", "which contains a fit method. :param fitable_config: Optional dict. Contains data which can", "list = None, ) -> Model: \"\"\" :param run: sacred.Run object. See sacred", "list. List of tensorflow.keras.Callback objects to pass to fitable.fit method. :return: tensorflow.keras.Model object.", "self.exp_config[\"run_config\"][\"save_model\"]: self._save_fitable(run, fitable) return fitable def _load_data(self, config: dict = None) -> Tuple[DataLoader,", "\"\"\" Private method containing the actual work completed by the job. Implemented is", "callbacks.extend( [ TensorBoard( **dict( **self.exp_config[\"tb_config\"], log_dir=tensorboard_directory, ) ), ReduceLROnPlateau(**self.exp_config[\"lr_config\"]), ] ) kwargs =", "documentation for more details on utility. :param fitable: Optional tensorflow.keras.Model or kerastuner.Tuner object.", "tuple) -> float: \"\"\" :param fitable: tensorflow.keras.Model object. :param test_data: tuple. contains (x_test,", "a new DataLoader instance. \"\"\" loader, data = self._load_data(loader_config) fitable = fitable or", "test data in the form (train, val, test), where train is the tuple", "Model or Tuner object. \"\"\" fitable_config = fitable_config or self.exp_config[\"builder_config\"] conf = dict(", "data: tuple. train, validation, and test data in the form (train, val, test),", "fitable or self._load_fitable(loader, fitable_config) fitable = self._fit(run, fitable, data) if self.exp_config[\"run_config\"][\"test\"]: self._test_fitable(run, fitable,", "import Job from ..ingredients import ( get_data_loader, get_builder, ) from ..loaders import DataLoader", "validation, and test data in the form (train, val, test), where train is", "config = config or self.exp_config[\"loader_config\"] loader = get_data_loader(**config) if self.exp_config[\"run_config\"][\"select_few\"]: data = loader.few_examples(**config[\"load_kwargs\"])", "new DataLoader instance. \"\"\" loader, data = self._load_data(loader_config) fitable = fitable or self._load_fitable(loader,", "= loader.few_examples(**config[\"load_kwargs\"]) else: data = loader.load_data(**config[\"load_kwargs\"]) return loader, data def _load_fitable(self, loader: DataLoader,", "(x_train, y_train). :param callbacks: Optional list. List of tensorflow.keras.Callback objects to pass to", "documentation for more details on utility. :param fitable: tensorflow.keras.Model object. \"\"\" path =", "the form (train, val, test), where train is the tuple (x_train, y_train). :param", "= builder.get_model() model.compile(**compile_kwargs) return model def _fit( self, run: Run, fitable: Model, data:", "method containing the actual work completed by the job. Implemented is a default", "tensorflow.keras.Model object. \"\"\" tensorboard_directory = self.exp_config[\"run_config\"][\"root_dir\"] / \"logs\" (x_train, y_train), val, _ =", "..loaders import DataLoader class KerasJob(Job): def _main( self, run: Run, seed: int, fitable:", "Run, seed: int, fitable: Model = None, fitable_config: dict = None, loader_config: dict", "Loader object and the data returned by that Loader's get_data method. \"\"\" config", "= dict( loss=run_config[\"loss\"], loss_weights=run_config[\"loss_weights\"], optimizer=run_config[\"optimizer\"], metrics=run_config[\"metrics\"], run_eagerly=run_config[\"run_eagerly\"], ) if run_config[\"use_strategy\"]: strategy = tf.distribute.MirroredStrategy()", "self.exp_config[\"builder_config\"] conf = dict( **fitable_config, max_z=loader.max_z, num_points=loader.num_points, mu=loader.mu, sigma=loader.sigma, ) builder = get_builder(**conf)", "self.exp_config[\"run_config\"][\"select_few\"]: data = loader.few_examples(**config[\"load_kwargs\"]) else: data = loader.load_data(**config[\"load_kwargs\"]) return loader, data def _load_fitable(self,", "Private method containing the actual work completed by the job. Implemented is a", "import Tuple from sacred.run import Run import tensorflow as tf from tensorflow.keras.callbacks import", "sacred.Run object. See sacred documentation for details on utility. :param fitable: tensorflow.keras.Model object.", "get_data_loader to obtain specific data_loader class. :return: Loader object and the data returned", "pathlib import Path from typing import Tuple from sacred.run import Run import tensorflow", "Model-like which contains a fit method. :param fitable_config: Optional dict. Contains data which", "= self.exp_config[\"run_config\"][\"root_dir\"] / \"logs\" (x_train, y_train), val, _ = data callbacks = callbacks", "to pass to fitable.fit method. :return: tensorflow.keras.Model object. \"\"\" tensorboard_directory = self.exp_config[\"run_config\"][\"root_dir\"] /", "or kerastuner.Tuner object. Model-like which contains a fit method. :param fitable_config: Optional dict.", "of fitable is beind loaded. :return: Model or Tuner object. \"\"\" fitable_config =", "return model def _fit( self, run: Run, fitable: Model, data: tuple, callbacks: list", "on utility. :param fitable: tensorflow.keras.Model object. \"\"\" path = self.exp_config[\"run_config\"][\"model_path\"] if self.exp_config[\"run_config\"][\"save_verbosity\"] >", "is beind loaded. :return: Model or Tuner object. \"\"\" fitable_config = fitable_config or", "= callbacks or [] if self.exp_config[\"run_config\"][\"use_default_callbacks\"]: callbacks.extend( [ TensorBoard( **dict( **self.exp_config[\"tb_config\"], log_dir=tensorboard_directory, )", "] ) kwargs = dict( x=x_train, y=y_train, epochs=self.exp_config[\"run_config\"][\"epochs\"], batch_size=self.exp_config[\"run_config\"][\"batch_size\"], validation_data=val, class_weight=self.exp_config[\"run_config\"][\"class_weight\"], callbacks=callbacks, verbose=self.exp_config[\"run_config\"][\"fit_verbosity\"],", "TensorBoard from tensorflow.keras.models import Model from .job import Job from ..ingredients import (", "tuple. train, validation, and test data in the form (train, val, test), where", "\"\"\" :param run: sacred.Run object. see sacred documentation for more details on utility.", "): \"\"\" Private method containing the actual work completed by the job. Implemented", ":param fitable: tensorflow.keras.Model object. :param data: tuple. train, validation, and test data in", "run_config[\"use_strategy\"]: strategy = tf.distribute.MirroredStrategy() with strategy.scope(): model = builder.get_model() model.compile(**compile_kwargs) else: model =", "import ReduceLROnPlateau, TensorBoard from tensorflow.keras.models import Model from .job import Job from ..ingredients", "= data callbacks = callbacks or [] if self.exp_config[\"run_config\"][\"use_default_callbacks\"]: callbacks.extend( [ TensorBoard( **dict(", "strategy = tf.distribute.MirroredStrategy() with strategy.scope(): model = builder.get_model() model.compile(**compile_kwargs) else: model = builder.get_model()", "or self.exp_config[\"loader_config\"] loader = get_data_loader(**config) if self.exp_config[\"run_config\"][\"select_few\"]: data = loader.few_examples(**config[\"load_kwargs\"]) else: data =", "run: Run, seed: int, fitable: Model = None, fitable_config: dict = None, loader_config:", "for a basic keras/kerastuner type job. :param run: sacred.Run object. See sacred documentation", "utility. :param fitable: tensorflow.keras.Model object. \"\"\" path = self.exp_config[\"run_config\"][\"model_path\"] if self.exp_config[\"run_config\"][\"save_verbosity\"] > 0:", ".job import Job from ..ingredients import ( get_data_loader, get_builder, ) from ..loaders import", "to fitable.fit method. :return: tensorflow.keras.Model object. \"\"\" tensorboard_directory = self.exp_config[\"run_config\"][\"root_dir\"] / \"logs\" (x_train,", "= None, loader_config: dict = None, ): \"\"\" Private method containing the actual", "fitable: Optional tensorflow.keras.Model or kerastuner.Tuner object. Model-like which contains a fit method. :param", "run: Run, fitable: Model, data: tuple, callbacks: list = None, ) -> Model:", "else: model = builder.get_model() model.compile(**compile_kwargs) return model def _fit( self, run: Run, fitable:", "sacred documentation for more details on utility. :param fitable: Optional tensorflow.keras.Model or kerastuner.Tuner", "the job. Implemented is a default workflow for a basic keras/kerastuner type job.", "completed by the job. Implemented is a default workflow for a basic keras/kerastuner", "else: data = loader.load_data(**config[\"load_kwargs\"]) return loader, data def _load_fitable(self, loader: DataLoader, fitable_config: dict", "from ..ingredients import ( get_data_loader, get_builder, ) from ..loaders import DataLoader class KerasJob(Job):", "using ingredients.get_loader and self.exp_config['loader_config'] :param config: Optional dict. config passed to get_data_loader to", "= self.exp_config[\"run_config\"] compile_kwargs = dict( loss=run_config[\"loss\"], loss_weights=run_config[\"loss_weights\"], optimizer=run_config[\"optimizer\"], metrics=run_config[\"metrics\"], run_eagerly=run_config[\"run_eagerly\"], ) if run_config[\"use_strategy\"]:", "by the job. Implemented is a default workflow for a basic keras/kerastuner type", "sacred.run import Run import tensorflow as tf from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard from", "self.exp_config[\"run_config\"][\"save_verbosity\"] > 0: fitable.summary() fitable.save(self.exp_config[\"run_config\"][\"model_path\"]) run.add_artifact(path) def _new_model_path(self, name: str): model_path = Path(self.exp_config[\"run_config\"][\"model_path\"]).parent", "fitable is beind loaded. :return: Model or Tuner object. \"\"\" fitable_config = fitable_config", "self.exp_config[\"run_config\"][\"model_path\"] if self.exp_config[\"run_config\"][\"save_verbosity\"] > 0: fitable.summary() fitable.save(self.exp_config[\"run_config\"][\"model_path\"]) run.add_artifact(path) def _new_model_path(self, name: str): model_path", "num_points=loader.num_points, mu=loader.mu, sigma=loader.sigma, ) builder = get_builder(**conf) run_config = self.exp_config[\"run_config\"] compile_kwargs = dict(", "\"logs\" (x_train, y_train), val, _ = data callbacks = callbacks or [] if", "**dict( **self.exp_config[\"tb_config\"], log_dir=tensorboard_directory, ) ), ReduceLROnPlateau(**self.exp_config[\"lr_config\"]), ] ) kwargs = dict( x=x_train, y=y_train,", "verbose=0) print(f\"Test split results: {loss}\") return loss def _save_fitable(self, run: Run, fitable: Model):", "test_loss value. \"\"\" if test_data is None: return 0.0 x_test, y_test = test_data", "Job from ..ingredients import ( get_data_loader, get_builder, ) from ..loaders import DataLoader class", "run: sacred.Run object. See sacred documentation for details on utility. :param fitable: tensorflow.keras.Model", "from tensorflow.keras.models import Model from .job import Job from ..ingredients import ( get_data_loader,", "class. :return: Loader object and the data returned by that Loader's get_data method.", "(train, val, test), where train is the tuple (x_train, y_train). :param callbacks: Optional", "Optional list. List of tensorflow.keras.Callback objects to pass to fitable.fit method. :return: tensorflow.keras.Model", "a new fitable instance. :param loader_config: Optional dict. Contains data which can be", "path = self.exp_config[\"run_config\"][\"model_path\"] if self.exp_config[\"run_config\"][\"save_verbosity\"] > 0: fitable.summary() fitable.save(self.exp_config[\"run_config\"][\"model_path\"]) run.add_artifact(path) def _new_model_path(self, name:", "Model: \"\"\" Defines and compiles a fitable (keras.model or keras_tuner.tuner) which implements a", ":param run: sacred.Run object. See sacred documentation for more details on utility. :param", "loaded. :return: Model or Tuner object. \"\"\" fitable_config = fitable_config or self.exp_config[\"builder_config\"] conf", "fitable.fit method. :return: tensorflow.keras.Model object. \"\"\" tensorboard_directory = self.exp_config[\"run_config\"][\"root_dir\"] / \"logs\" (x_train, y_train),", "details on utility. :param fitable: tensorflow.keras.Model object. \"\"\" path = self.exp_config[\"run_config\"][\"model_path\"] if self.exp_config[\"run_config\"][\"save_verbosity\"]", "_load_fitable(self, loader: DataLoader, fitable_config: dict = None) -> Model: \"\"\" Defines and compiles", "fitable: Model, test_data: tuple) -> float: \"\"\" :param fitable: tensorflow.keras.Model object. :param test_data:", "ReduceLROnPlateau(**self.exp_config[\"lr_config\"]), ] ) kwargs = dict( x=x_train, y=y_train, epochs=self.exp_config[\"run_config\"][\"epochs\"], batch_size=self.exp_config[\"run_config\"][\"batch_size\"], validation_data=val, class_weight=self.exp_config[\"run_config\"][\"class_weight\"], callbacks=callbacks,", "the data returned by that Loader's get_data method. \"\"\" config = config or", "dict = None, ): \"\"\" Private method containing the actual work completed by", "if self.exp_config[\"run_config\"][\"save_verbosity\"] > 0: fitable.summary() fitable.save(self.exp_config[\"run_config\"][\"model_path\"]) run.add_artifact(path) def _new_model_path(self, name: str): model_path =", "= get_data_loader(**config) if self.exp_config[\"run_config\"][\"select_few\"]: data = loader.few_examples(**config[\"load_kwargs\"]) else: data = loader.load_data(**config[\"load_kwargs\"]) return loader,", "fitable (keras.model or keras_tuner.tuner) which implements a 'fit' method. This method calls either", "is None: return 0.0 x_test, y_test = test_data loss = fitable.evaluate(x=x_test, y=y_test, verbose=0)", "data callbacks = callbacks or [] if self.exp_config[\"run_config\"][\"use_default_callbacks\"]: callbacks.extend( [ TensorBoard( **dict( **self.exp_config[\"tb_config\"],", "object and the data returned by that Loader's get_data method. \"\"\" config =", "tuple. contains (x_test, y_test). :return: float. Scalar test_loss value. \"\"\" if test_data is", "None, fitable_config: dict = None, loader_config: dict = None, ): \"\"\" Private method", "and compiles a fitable (keras.model or keras_tuner.tuner) which implements a 'fit' method. This", "_main( self, run: Run, seed: int, fitable: Model = None, fitable_config: dict =", "method. \"\"\" config = config or self.exp_config[\"loader_config\"] loader = get_data_loader(**config) if self.exp_config[\"run_config\"][\"select_few\"]: data", "tf from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard from tensorflow.keras.models import Model from .job import", "loader_config: Optional dict. Contains data which can be used to create a new", "dict( x=x_train, y=y_train, epochs=self.exp_config[\"run_config\"][\"epochs\"], batch_size=self.exp_config[\"run_config\"][\"batch_size\"], validation_data=val, class_weight=self.exp_config[\"run_config\"][\"class_weight\"], callbacks=callbacks, verbose=self.exp_config[\"run_config\"][\"fit_verbosity\"], ) fitable.fit(**kwargs) return fitable", "object. See sacred documentation for more details on utility. :param fitable: Optional tensorflow.keras.Model", ") -> Model: \"\"\" :param run: sacred.Run object. See sacred documentation for details", "loader = get_data_loader(**config) if self.exp_config[\"run_config\"][\"select_few\"]: data = loader.few_examples(**config[\"load_kwargs\"]) else: data = loader.load_data(**config[\"load_kwargs\"]) return", "instance. :param loader_config: Optional dict. Contains data which can be used to create", "callbacks: Optional list. List of tensorflow.keras.Callback objects to pass to fitable.fit method. :return:", "( get_data_loader, get_builder, ) from ..loaders import DataLoader class KerasJob(Job): def _main( self,", "dict = None, loader_config: dict = None, ): \"\"\" Private method containing the", "where train is the tuple (x_train, y_train). :param callbacks: Optional list. List of", "data in the form (train, val, test), where train is the tuple (x_train,", "fitable_config: Optional dict. Contains data which can be used to create a new", "or [] if self.exp_config[\"run_config\"][\"use_default_callbacks\"]: callbacks.extend( [ TensorBoard( **dict( **self.exp_config[\"tb_config\"], log_dir=tensorboard_directory, ) ), ReduceLROnPlateau(**self.exp_config[\"lr_config\"]),", "tensorflow.keras.Callback objects to pass to fitable.fit method. :return: tensorflow.keras.Model object. \"\"\" tensorboard_directory =", "builder = get_builder(**conf) run_config = self.exp_config[\"run_config\"] compile_kwargs = dict( loss=run_config[\"loss\"], loss_weights=run_config[\"loss_weights\"], optimizer=run_config[\"optimizer\"], metrics=run_config[\"metrics\"],", "/ \"logs\" (x_train, y_train), val, _ = data callbacks = callbacks or []", "TensorBoard( **dict( **self.exp_config[\"tb_config\"], log_dir=tensorboard_directory, ) ), ReduceLROnPlateau(**self.exp_config[\"lr_config\"]), ] ) kwargs = dict( x=x_train,", "run: Run, fitable: Model, test_data: tuple) -> float: \"\"\" :param fitable: tensorflow.keras.Model object.", "from ..loaders import DataLoader class KerasJob(Job): def _main( self, run: Run, seed: int,", "fitable_config) fitable = self._fit(run, fitable, data) if self.exp_config[\"run_config\"][\"test\"]: self._test_fitable(run, fitable, data[-1]) if self.exp_config[\"run_config\"][\"save_model\"]:", "if test_data is None: return 0.0 x_test, y_test = test_data loss = fitable.evaluate(x=x_test,", "(keras.model or keras_tuner.tuner) which implements a 'fit' method. This method calls either get_builder,", "job. Implemented is a default workflow for a basic keras/kerastuner type job. :param", "callbacks=callbacks, verbose=self.exp_config[\"run_config\"][\"fit_verbosity\"], ) fitable.fit(**kwargs) return fitable def _test_fitable(self, run: Run, fitable: Model, test_data:", "def _load_data(self, config: dict = None) -> Tuple[DataLoader, Tuple]: \"\"\" Obtains a loader", "= config or self.exp_config[\"loader_config\"] loader = get_data_loader(**config) if self.exp_config[\"run_config\"][\"select_few\"]: data = loader.few_examples(**config[\"load_kwargs\"]) else:", "method calls either get_builder, or get_hyper_factory, depending on which type of fitable is", "object. :param data: tuple. train, validation, and test data in the form (train,", "create a new DataLoader instance. \"\"\" loader, data = self._load_data(loader_config) fitable = fitable", "pass to fitable.fit method. :return: tensorflow.keras.Model object. \"\"\" tensorboard_directory = self.exp_config[\"run_config\"][\"root_dir\"] / \"logs\"", "a fit method. :param fitable_config: Optional dict. Contains data which can be used", "data which can be used to create a new fitable instance. :param loader_config:", "y_test = test_data loss = fitable.evaluate(x=x_test, y=y_test, verbose=0) print(f\"Test split results: {loss}\") return", "Obtains a loader using ingredients.get_loader and self.exp_config['loader_config'] :param config: Optional dict. config passed", "Tuner object. \"\"\" fitable_config = fitable_config or self.exp_config[\"builder_config\"] conf = dict( **fitable_config, max_z=loader.max_z,", "actual work completed by the job. Implemented is a default workflow for a", "which implements a 'fit' method. This method calls either get_builder, or get_hyper_factory, depending", "loss def _save_fitable(self, run: Run, fitable: Model): \"\"\" :param run: sacred.Run object. see", "or get_hyper_factory, depending on which type of fitable is beind loaded. :return: Model", "class_weight=self.exp_config[\"run_config\"][\"class_weight\"], callbacks=callbacks, verbose=self.exp_config[\"run_config\"][\"fit_verbosity\"], ) fitable.fit(**kwargs) return fitable def _test_fitable(self, run: Run, fitable: Model,", "def _new_model_path(self, name: str): model_path = Path(self.exp_config[\"run_config\"][\"model_path\"]).parent / name self.exp_config[\"run_config\"][\"model_path\"] = model_path return", "Optional dict. Contains data which can be used to create a new fitable", "instance. \"\"\" loader, data = self._load_data(loader_config) fitable = fitable or self._load_fitable(loader, fitable_config) fitable", "fitable_config: dict = None) -> Model: \"\"\" Defines and compiles a fitable (keras.model", "dict. Contains data which can be used to create a new DataLoader instance.", "List of tensorflow.keras.Callback objects to pass to fitable.fit method. :return: tensorflow.keras.Model object. \"\"\"", "of tensorflow.keras.Callback objects to pass to fitable.fit method. :return: tensorflow.keras.Model object. \"\"\" tensorboard_directory", "train is the tuple (x_train, y_train). :param callbacks: Optional list. List of tensorflow.keras.Callback", "_test_fitable(self, run: Run, fitable: Model, test_data: tuple) -> float: \"\"\" :param fitable: tensorflow.keras.Model", "\"\"\" path = self.exp_config[\"run_config\"][\"model_path\"] if self.exp_config[\"run_config\"][\"save_verbosity\"] > 0: fitable.summary() fitable.save(self.exp_config[\"run_config\"][\"model_path\"]) run.add_artifact(path) def _new_model_path(self,", "Model, data: tuple, callbacks: list = None, ) -> Model: \"\"\" :param run:", "dict. config passed to get_data_loader to obtain specific data_loader class. :return: Loader object", "loader_config: dict = None, ): \"\"\" Private method containing the actual work completed", "from pathlib import Path from typing import Tuple from sacred.run import Run import", "if run_config[\"use_strategy\"]: strategy = tf.distribute.MirroredStrategy() with strategy.scope(): model = builder.get_model() model.compile(**compile_kwargs) else: model", ") kwargs = dict( x=x_train, y=y_train, epochs=self.exp_config[\"run_config\"][\"epochs\"], batch_size=self.exp_config[\"run_config\"][\"batch_size\"], validation_data=val, class_weight=self.exp_config[\"run_config\"][\"class_weight\"], callbacks=callbacks, verbose=self.exp_config[\"run_config\"][\"fit_verbosity\"], )", "and self.exp_config['loader_config'] :param config: Optional dict. config passed to get_data_loader to obtain specific", "None, loader_config: dict = None, ): \"\"\" Private method containing the actual work", "= loader.load_data(**config[\"load_kwargs\"]) return loader, data def _load_fitable(self, loader: DataLoader, fitable_config: dict = None)", "fitable, data) if self.exp_config[\"run_config\"][\"test\"]: self._test_fitable(run, fitable, data[-1]) if self.exp_config[\"run_config\"][\"save_model\"]: self._save_fitable(run, fitable) return fitable", "config or self.exp_config[\"loader_config\"] loader = get_data_loader(**config) if self.exp_config[\"run_config\"][\"select_few\"]: data = loader.few_examples(**config[\"load_kwargs\"]) else: data", "object. Model-like which contains a fit method. :param fitable_config: Optional dict. Contains data", "conf = dict( **fitable_config, max_z=loader.max_z, num_points=loader.num_points, mu=loader.mu, sigma=loader.sigma, ) builder = get_builder(**conf) run_config", "\"\"\" config = config or self.exp_config[\"loader_config\"] loader = get_data_loader(**config) if self.exp_config[\"run_config\"][\"select_few\"]: data =", "type of fitable is beind loaded. :return: Model or Tuner object. \"\"\" fitable_config", ":param fitable_config: Optional dict. Contains data which can be used to create a", "sigma=loader.sigma, ) builder = get_builder(**conf) run_config = self.exp_config[\"run_config\"] compile_kwargs = dict( loss=run_config[\"loss\"], loss_weights=run_config[\"loss_weights\"],", "loader, data def _load_fitable(self, loader: DataLoader, fitable_config: dict = None) -> Model: \"\"\"", "= self._fit(run, fitable, data) if self.exp_config[\"run_config\"][\"test\"]: self._test_fitable(run, fitable, data[-1]) if self.exp_config[\"run_config\"][\"save_model\"]: self._save_fitable(run, fitable)", "This method calls either get_builder, or get_hyper_factory, depending on which type of fitable", "Contains data which can be used to create a new fitable instance. :param", "for details on utility. :param fitable: tensorflow.keras.Model object. :param data: tuple. train, validation,", "None) -> Model: \"\"\" Defines and compiles a fitable (keras.model or keras_tuner.tuner) which", "fitable_config = fitable_config or self.exp_config[\"builder_config\"] conf = dict( **fitable_config, max_z=loader.max_z, num_points=loader.num_points, mu=loader.mu, sigma=loader.sigma,", "test_data: tuple. contains (x_test, y_test). :return: float. Scalar test_loss value. \"\"\" if test_data", "or Tuner object. \"\"\" fitable_config = fitable_config or self.exp_config[\"builder_config\"] conf = dict( **fitable_config,", "import Path from typing import Tuple from sacred.run import Run import tensorflow as", "x_test, y_test = test_data loss = fitable.evaluate(x=x_test, y=y_test, verbose=0) print(f\"Test split results: {loss}\")", "loss = fitable.evaluate(x=x_test, y=y_test, verbose=0) print(f\"Test split results: {loss}\") return loss def _save_fitable(self,", "log_dir=tensorboard_directory, ) ), ReduceLROnPlateau(**self.exp_config[\"lr_config\"]), ] ) kwargs = dict( x=x_train, y=y_train, epochs=self.exp_config[\"run_config\"][\"epochs\"], batch_size=self.exp_config[\"run_config\"][\"batch_size\"],", "= get_builder(**conf) run_config = self.exp_config[\"run_config\"] compile_kwargs = dict( loss=run_config[\"loss\"], loss_weights=run_config[\"loss_weights\"], optimizer=run_config[\"optimizer\"], metrics=run_config[\"metrics\"], run_eagerly=run_config[\"run_eagerly\"],", "fitable: Model): \"\"\" :param run: sacred.Run object. see sacred documentation for more details", "to create a new DataLoader instance. \"\"\" loader, data = self._load_data(loader_config) fitable =", "Tuple from sacred.run import Run import tensorflow as tf from tensorflow.keras.callbacks import ReduceLROnPlateau,", "to create a new fitable instance. :param loader_config: Optional dict. Contains data which", "import ( get_data_loader, get_builder, ) from ..loaders import DataLoader class KerasJob(Job): def _main(", ") if run_config[\"use_strategy\"]: strategy = tf.distribute.MirroredStrategy() with strategy.scope(): model = builder.get_model() model.compile(**compile_kwargs) else:", "y_train), val, _ = data callbacks = callbacks or [] if self.exp_config[\"run_config\"][\"use_default_callbacks\"]: callbacks.extend(", "self.exp_config[\"loader_config\"] loader = get_data_loader(**config) if self.exp_config[\"run_config\"][\"select_few\"]: data = loader.few_examples(**config[\"load_kwargs\"]) else: data = loader.load_data(**config[\"load_kwargs\"])", "\"\"\" :param fitable: tensorflow.keras.Model object. :param test_data: tuple. contains (x_test, y_test). :return: float.", "if self.exp_config[\"run_config\"][\"test\"]: self._test_fitable(run, fitable, data[-1]) if self.exp_config[\"run_config\"][\"save_model\"]: self._save_fitable(run, fitable) return fitable def _load_data(self,", "data[-1]) if self.exp_config[\"run_config\"][\"save_model\"]: self._save_fitable(run, fitable) return fitable def _load_data(self, config: dict = None)", "[] if self.exp_config[\"run_config\"][\"use_default_callbacks\"]: callbacks.extend( [ TensorBoard( **dict( **self.exp_config[\"tb_config\"], log_dir=tensorboard_directory, ) ), ReduceLROnPlateau(**self.exp_config[\"lr_config\"]), ]", ":param data: tuple. train, validation, and test data in the form (train, val,", "self, run: Run, fitable: Model, data: tuple, callbacks: list = None, ) ->", "tuple (x_train, y_train). :param callbacks: Optional list. List of tensorflow.keras.Callback objects to pass", "-> Model: \"\"\" :param run: sacred.Run object. See sacred documentation for details on", "data = self._load_data(loader_config) fitable = fitable or self._load_fitable(loader, fitable_config) fitable = self._fit(run, fitable,", "self._load_fitable(loader, fitable_config) fitable = self._fit(run, fitable, data) if self.exp_config[\"run_config\"][\"test\"]: self._test_fitable(run, fitable, data[-1]) if", "get_data_loader, get_builder, ) from ..loaders import DataLoader class KerasJob(Job): def _main( self, run:", ") ), ReduceLROnPlateau(**self.exp_config[\"lr_config\"]), ] ) kwargs = dict( x=x_train, y=y_train, epochs=self.exp_config[\"run_config\"][\"epochs\"], batch_size=self.exp_config[\"run_config\"][\"batch_size\"], validation_data=val,", ") fitable.fit(**kwargs) return fitable def _test_fitable(self, run: Run, fitable: Model, test_data: tuple) ->", ") from ..loaders import DataLoader class KerasJob(Job): def _main( self, run: Run, seed:", "self, run: Run, seed: int, fitable: Model = None, fitable_config: dict = None,", ":param fitable: tensorflow.keras.Model object. \"\"\" path = self.exp_config[\"run_config\"][\"model_path\"] if self.exp_config[\"run_config\"][\"save_verbosity\"] > 0: fitable.summary()", "fitable.fit(**kwargs) return fitable def _test_fitable(self, run: Run, fitable: Model, test_data: tuple) -> float:", "used to create a new fitable instance. :param loader_config: Optional dict. Contains data", "the actual work completed by the job. Implemented is a default workflow for", "model.compile(**compile_kwargs) else: model = builder.get_model() model.compile(**compile_kwargs) return model def _fit( self, run: Run,", "tf.distribute.MirroredStrategy() with strategy.scope(): model = builder.get_model() model.compile(**compile_kwargs) else: model = builder.get_model() model.compile(**compile_kwargs) return", "Defines and compiles a fitable (keras.model or keras_tuner.tuner) which implements a 'fit' method.", "import tensorflow as tf from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard from tensorflow.keras.models import Model", "verbose=self.exp_config[\"run_config\"][\"fit_verbosity\"], ) fitable.fit(**kwargs) return fitable def _test_fitable(self, run: Run, fitable: Model, test_data: tuple)", "= test_data loss = fitable.evaluate(x=x_test, y=y_test, verbose=0) print(f\"Test split results: {loss}\") return loss", "in the form (train, val, test), where train is the tuple (x_train, y_train).", "= builder.get_model() model.compile(**compile_kwargs) else: model = builder.get_model() model.compile(**compile_kwargs) return model def _fit( self,", "or self.exp_config[\"builder_config\"] conf = dict( **fitable_config, max_z=loader.max_z, num_points=loader.num_points, mu=loader.mu, sigma=loader.sigma, ) builder =", "import Model from .job import Job from ..ingredients import ( get_data_loader, get_builder, )", "Contains data which can be used to create a new DataLoader instance. \"\"\"", "tensorflow.keras.Model object. :param test_data: tuple. contains (x_test, y_test). :return: float. Scalar test_loss value.", "dict. Contains data which can be used to create a new fitable instance.", "from .job import Job from ..ingredients import ( get_data_loader, get_builder, ) from ..loaders", "Optional dict. Contains data which can be used to create a new DataLoader", "tensorflow.keras.Model or kerastuner.Tuner object. Model-like which contains a fit method. :param fitable_config: Optional", "fitable def _load_data(self, config: dict = None) -> Tuple[DataLoader, Tuple]: \"\"\" Obtains a", "run: Run, fitable: Model): \"\"\" :param run: sacred.Run object. see sacred documentation for", "\"\"\" Defines and compiles a fitable (keras.model or keras_tuner.tuner) which implements a 'fit'", "train, validation, and test data in the form (train, val, test), where train", "\"\"\" if test_data is None: return 0.0 x_test, y_test = test_data loss =", "depending on which type of fitable is beind loaded. :return: Model or Tuner", "'fit' method. This method calls either get_builder, or get_hyper_factory, depending on which type", "[ TensorBoard( **dict( **self.exp_config[\"tb_config\"], log_dir=tensorboard_directory, ) ), ReduceLROnPlateau(**self.exp_config[\"lr_config\"]), ] ) kwargs = dict(", "test_data is None: return 0.0 x_test, y_test = test_data loss = fitable.evaluate(x=x_test, y=y_test,", "\"\"\" loader, data = self._load_data(loader_config) fitable = fitable or self._load_fitable(loader, fitable_config) fitable =", "Model, test_data: tuple) -> float: \"\"\" :param fitable: tensorflow.keras.Model object. :param test_data: tuple.", "), ReduceLROnPlateau(**self.exp_config[\"lr_config\"]), ] ) kwargs = dict( x=x_train, y=y_train, epochs=self.exp_config[\"run_config\"][\"epochs\"], batch_size=self.exp_config[\"run_config\"][\"batch_size\"], validation_data=val, class_weight=self.exp_config[\"run_config\"][\"class_weight\"],", "Tuple]: \"\"\" Obtains a loader using ingredients.get_loader and self.exp_config['loader_config'] :param config: Optional dict.", "sacred.Run object. See sacred documentation for more details on utility. :param fitable: Optional", "DataLoader, fitable_config: dict = None) -> Model: \"\"\" Defines and compiles a fitable", "= None, fitable_config: dict = None, loader_config: dict = None, ): \"\"\" Private", "max_z=loader.max_z, num_points=loader.num_points, mu=loader.mu, sigma=loader.sigma, ) builder = get_builder(**conf) run_config = self.exp_config[\"run_config\"] compile_kwargs =", "builder.get_model() model.compile(**compile_kwargs) else: model = builder.get_model() model.compile(**compile_kwargs) return model def _fit( self, run:", "config passed to get_data_loader to obtain specific data_loader class. :return: Loader object and", "data) if self.exp_config[\"run_config\"][\"test\"]: self._test_fitable(run, fitable, data[-1]) if self.exp_config[\"run_config\"][\"save_model\"]: self._save_fitable(run, fitable) return fitable def", "mu=loader.mu, sigma=loader.sigma, ) builder = get_builder(**conf) run_config = self.exp_config[\"run_config\"] compile_kwargs = dict( loss=run_config[\"loss\"],", "method. :return: tensorflow.keras.Model object. \"\"\" tensorboard_directory = self.exp_config[\"run_config\"][\"root_dir\"] / \"logs\" (x_train, y_train), val,", "if self.exp_config[\"run_config\"][\"save_model\"]: self._save_fitable(run, fitable) return fitable def _load_data(self, config: dict = None) ->", "**fitable_config, max_z=loader.max_z, num_points=loader.num_points, mu=loader.mu, sigma=loader.sigma, ) builder = get_builder(**conf) run_config = self.exp_config[\"run_config\"] compile_kwargs", "work completed by the job. Implemented is a default workflow for a basic", "tensorboard_directory = self.exp_config[\"run_config\"][\"root_dir\"] / \"logs\" (x_train, y_train), val, _ = data callbacks =", "value. \"\"\" if test_data is None: return 0.0 x_test, y_test = test_data loss", "from typing import Tuple from sacred.run import Run import tensorflow as tf from", "dict = None) -> Tuple[DataLoader, Tuple]: \"\"\" Obtains a loader using ingredients.get_loader and", "Path from typing import Tuple from sacred.run import Run import tensorflow as tf", "a fitable (keras.model or keras_tuner.tuner) which implements a 'fit' method. This method calls", "Model: \"\"\" :param run: sacred.Run object. See sacred documentation for details on utility.", "which can be used to create a new DataLoader instance. \"\"\" loader, data", "objects to pass to fitable.fit method. :return: tensorflow.keras.Model object. \"\"\" tensorboard_directory = self.exp_config[\"run_config\"][\"root_dir\"]", "more details on utility. :param fitable: tensorflow.keras.Model object. \"\"\" path = self.exp_config[\"run_config\"][\"model_path\"] if", "fitable = fitable or self._load_fitable(loader, fitable_config) fitable = self._fit(run, fitable, data) if self.exp_config[\"run_config\"][\"test\"]:", "details on utility. :param fitable: tensorflow.keras.Model object. :param data: tuple. train, validation, and", "..ingredients import ( get_data_loader, get_builder, ) from ..loaders import DataLoader class KerasJob(Job): def", "def _main( self, run: Run, seed: int, fitable: Model = None, fitable_config: dict", "sacred.Run object. see sacred documentation for more details on utility. :param fitable: tensorflow.keras.Model", "new fitable instance. :param loader_config: Optional dict. Contains data which can be used", "returned by that Loader's get_data method. \"\"\" config = config or self.exp_config[\"loader_config\"] loader", "which type of fitable is beind loaded. :return: Model or Tuner object. \"\"\"", "tensorflow.keras.Model object. :param data: tuple. train, validation, and test data in the form", "**self.exp_config[\"tb_config\"], log_dir=tensorboard_directory, ) ), ReduceLROnPlateau(**self.exp_config[\"lr_config\"]), ] ) kwargs = dict( x=x_train, y=y_train, epochs=self.exp_config[\"run_config\"][\"epochs\"],", "self._load_data(loader_config) fitable = fitable or self._load_fitable(loader, fitable_config) fitable = self._fit(run, fitable, data) if", "a 'fit' method. This method calls either get_builder, or get_hyper_factory, depending on which", "_fit( self, run: Run, fitable: Model, data: tuple, callbacks: list = None, )", "more details on utility. :param fitable: Optional tensorflow.keras.Model or kerastuner.Tuner object. Model-like which", "kerastuner.Tuner object. Model-like which contains a fit method. :param fitable_config: Optional dict. Contains", "\"\"\" tensorboard_directory = self.exp_config[\"run_config\"][\"root_dir\"] / \"logs\" (x_train, y_train), val, _ = data callbacks", "callbacks: list = None, ) -> Model: \"\"\" :param run: sacred.Run object. See", "data: tuple, callbacks: list = None, ) -> Model: \"\"\" :param run: sacred.Run", ":return: Model or Tuner object. \"\"\" fitable_config = fitable_config or self.exp_config[\"builder_config\"] conf =", "dict( **fitable_config, max_z=loader.max_z, num_points=loader.num_points, mu=loader.mu, sigma=loader.sigma, ) builder = get_builder(**conf) run_config = self.exp_config[\"run_config\"]", "batch_size=self.exp_config[\"run_config\"][\"batch_size\"], validation_data=val, class_weight=self.exp_config[\"run_config\"][\"class_weight\"], callbacks=callbacks, verbose=self.exp_config[\"run_config\"][\"fit_verbosity\"], ) fitable.fit(**kwargs) return fitable def _test_fitable(self, run: Run,", "basic keras/kerastuner type job. :param run: sacred.Run object. See sacred documentation for more", "Run, fitable: Model, data: tuple, callbacks: list = None, ) -> Model: \"\"\"", "y=y_train, epochs=self.exp_config[\"run_config\"][\"epochs\"], batch_size=self.exp_config[\"run_config\"][\"batch_size\"], validation_data=val, class_weight=self.exp_config[\"run_config\"][\"class_weight\"], callbacks=callbacks, verbose=self.exp_config[\"run_config\"][\"fit_verbosity\"], ) fitable.fit(**kwargs) return fitable def _test_fitable(self,", "self._test_fitable(run, fitable, data[-1]) if self.exp_config[\"run_config\"][\"save_model\"]: self._save_fitable(run, fitable) return fitable def _load_data(self, config: dict", "None, ): \"\"\" Private method containing the actual work completed by the job.", "test_data loss = fitable.evaluate(x=x_test, y=y_test, verbose=0) print(f\"Test split results: {loss}\") return loss def", "config: Optional dict. config passed to get_data_loader to obtain specific data_loader class. :return:", "None) -> Tuple[DataLoader, Tuple]: \"\"\" Obtains a loader using ingredients.get_loader and self.exp_config['loader_config'] :param", "(x_test, y_test). :return: float. Scalar test_loss value. \"\"\" if test_data is None: return", "on which type of fitable is beind loaded. :return: Model or Tuner object.", "def _load_fitable(self, loader: DataLoader, fitable_config: dict = None) -> Model: \"\"\" Defines and", "strategy.scope(): model = builder.get_model() model.compile(**compile_kwargs) else: model = builder.get_model() model.compile(**compile_kwargs) return model def", "with strategy.scope(): model = builder.get_model() model.compile(**compile_kwargs) else: model = builder.get_model() model.compile(**compile_kwargs) return model", "loss=run_config[\"loss\"], loss_weights=run_config[\"loss_weights\"], optimizer=run_config[\"optimizer\"], metrics=run_config[\"metrics\"], run_eagerly=run_config[\"run_eagerly\"], ) if run_config[\"use_strategy\"]: strategy = tf.distribute.MirroredStrategy() with strategy.scope():", "-> Tuple[DataLoader, Tuple]: \"\"\" Obtains a loader using ingredients.get_loader and self.exp_config['loader_config'] :param config:", "contains a fit method. :param fitable_config: Optional dict. Contains data which can be", "for more details on utility. :param fitable: tensorflow.keras.Model object. \"\"\" path = self.exp_config[\"run_config\"][\"model_path\"]", "object. see sacred documentation for more details on utility. :param fitable: tensorflow.keras.Model object.", "obtain specific data_loader class. :return: Loader object and the data returned by that", "float. Scalar test_loss value. \"\"\" if test_data is None: return 0.0 x_test, y_test", "fitable.summary() fitable.save(self.exp_config[\"run_config\"][\"model_path\"]) run.add_artifact(path) def _new_model_path(self, name: str): model_path = Path(self.exp_config[\"run_config\"][\"model_path\"]).parent / name self.exp_config[\"run_config\"][\"model_path\"]", "get_data_loader(**config) if self.exp_config[\"run_config\"][\"select_few\"]: data = loader.few_examples(**config[\"load_kwargs\"]) else: data = loader.load_data(**config[\"load_kwargs\"]) return loader, data", "method. This method calls either get_builder, or get_hyper_factory, depending on which type of", "return 0.0 x_test, y_test = test_data loss = fitable.evaluate(x=x_test, y=y_test, verbose=0) print(f\"Test split", "_load_data(self, config: dict = None) -> Tuple[DataLoader, Tuple]: \"\"\" Obtains a loader using", "get_data method. \"\"\" config = config or self.exp_config[\"loader_config\"] loader = get_data_loader(**config) if self.exp_config[\"run_config\"][\"select_few\"]:", "utility. :param fitable: Optional tensorflow.keras.Model or kerastuner.Tuner object. Model-like which contains a fit", "tuple, callbacks: list = None, ) -> Model: \"\"\" :param run: sacred.Run object.", "validation_data=val, class_weight=self.exp_config[\"run_config\"][\"class_weight\"], callbacks=callbacks, verbose=self.exp_config[\"run_config\"][\"fit_verbosity\"], ) fitable.fit(**kwargs) return fitable def _test_fitable(self, run: Run, fitable:", ") builder = get_builder(**conf) run_config = self.exp_config[\"run_config\"] compile_kwargs = dict( loss=run_config[\"loss\"], loss_weights=run_config[\"loss_weights\"], optimizer=run_config[\"optimizer\"],", "See sacred documentation for details on utility. :param fitable: tensorflow.keras.Model object. :param data:", "type job. :param run: sacred.Run object. See sacred documentation for more details on", "object. \"\"\" fitable_config = fitable_config or self.exp_config[\"builder_config\"] conf = dict( **fitable_config, max_z=loader.max_z, num_points=loader.num_points,", "to get_data_loader to obtain specific data_loader class. :return: Loader object and the data", "self._fit(run, fitable, data) if self.exp_config[\"run_config\"][\"test\"]: self._test_fitable(run, fitable, data[-1]) if self.exp_config[\"run_config\"][\"save_model\"]: self._save_fitable(run, fitable) return", "y_test). :return: float. Scalar test_loss value. \"\"\" if test_data is None: return 0.0", "_save_fitable(self, run: Run, fitable: Model): \"\"\" :param run: sacred.Run object. see sacred documentation", "= None, ): \"\"\" Private method containing the actual work completed by the", "Optional dict. config passed to get_data_loader to obtain specific data_loader class. :return: Loader", "tensorflow as tf from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard from tensorflow.keras.models import Model from", "for more details on utility. :param fitable: Optional tensorflow.keras.Model or kerastuner.Tuner object. Model-like", "ingredients.get_loader and self.exp_config['loader_config'] :param config: Optional dict. config passed to get_data_loader to obtain", "config: dict = None) -> Tuple[DataLoader, Tuple]: \"\"\" Obtains a loader using ingredients.get_loader", "Run, fitable: Model): \"\"\" :param run: sacred.Run object. see sacred documentation for more", "fitable: tensorflow.keras.Model object. :param test_data: tuple. contains (x_test, y_test). :return: float. Scalar test_loss", "and the data returned by that Loader's get_data method. \"\"\" config = config", "which can be used to create a new fitable instance. :param loader_config: Optional", "_ = data callbacks = callbacks or [] if self.exp_config[\"run_config\"][\"use_default_callbacks\"]: callbacks.extend( [ TensorBoard(", "run.add_artifact(path) def _new_model_path(self, name: str): model_path = Path(self.exp_config[\"run_config\"][\"model_path\"]).parent / name self.exp_config[\"run_config\"][\"model_path\"] = model_path", "= fitable or self._load_fitable(loader, fitable_config) fitable = self._fit(run, fitable, data) if self.exp_config[\"run_config\"][\"test\"]: self._test_fitable(run,", "compiles a fitable (keras.model or keras_tuner.tuner) which implements a 'fit' method. This method", "utility. :param fitable: tensorflow.keras.Model object. :param data: tuple. train, validation, and test data", "either get_builder, or get_hyper_factory, depending on which type of fitable is beind loaded.", ":return: tensorflow.keras.Model object. \"\"\" tensorboard_directory = self.exp_config[\"run_config\"][\"root_dir\"] / \"logs\" (x_train, y_train), val, _", "implements a 'fit' method. This method calls either get_builder, or get_hyper_factory, depending on", "> 0: fitable.summary() fitable.save(self.exp_config[\"run_config\"][\"model_path\"]) run.add_artifact(path) def _new_model_path(self, name: str): model_path = Path(self.exp_config[\"run_config\"][\"model_path\"]).parent /", "compile_kwargs = dict( loss=run_config[\"loss\"], loss_weights=run_config[\"loss_weights\"], optimizer=run_config[\"optimizer\"], metrics=run_config[\"metrics\"], run_eagerly=run_config[\"run_eagerly\"], ) if run_config[\"use_strategy\"]: strategy =", "return loader, data def _load_fitable(self, loader: DataLoader, fitable_config: dict = None) -> Model:", "Model = None, fitable_config: dict = None, loader_config: dict = None, ): \"\"\"", "is the tuple (x_train, y_train). :param callbacks: Optional list. List of tensorflow.keras.Callback objects", "kwargs = dict( x=x_train, y=y_train, epochs=self.exp_config[\"run_config\"][\"epochs\"], batch_size=self.exp_config[\"run_config\"][\"batch_size\"], validation_data=val, class_weight=self.exp_config[\"run_config\"][\"class_weight\"], callbacks=callbacks, verbose=self.exp_config[\"run_config\"][\"fit_verbosity\"], ) fitable.fit(**kwargs)", "contains (x_test, y_test). :return: float. Scalar test_loss value. \"\"\" if test_data is None:", "Implemented is a default workflow for a basic keras/kerastuner type job. :param run:", "to obtain specific data_loader class. :return: Loader object and the data returned by", "def _save_fitable(self, run: Run, fitable: Model): \"\"\" :param run: sacred.Run object. see sacred", "self._save_fitable(run, fitable) return fitable def _load_data(self, config: dict = None) -> Tuple[DataLoader, Tuple]:", "return loss def _save_fitable(self, run: Run, fitable: Model): \"\"\" :param run: sacred.Run object.", "run: sacred.Run object. see sacred documentation for more details on utility. :param fitable:", "get_builder, or get_hyper_factory, depending on which type of fitable is beind loaded. :return:", "val, _ = data callbacks = callbacks or [] if self.exp_config[\"run_config\"][\"use_default_callbacks\"]: callbacks.extend( [", "dict = None) -> Model: \"\"\" Defines and compiles a fitable (keras.model or", "print(f\"Test split results: {loss}\") return loss def _save_fitable(self, run: Run, fitable: Model): \"\"\"", ":param fitable: tensorflow.keras.Model object. :param test_data: tuple. contains (x_test, y_test). :return: float. Scalar", "keras/kerastuner type job. :param run: sacred.Run object. See sacred documentation for more details", "fitable: Model = None, fitable_config: dict = None, loader_config: dict = None, ):", "tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard from tensorflow.keras.models import Model from .job import Job from", "\"\"\" :param run: sacred.Run object. See sacred documentation for details on utility. :param" ]
[ "TSNE import pandas as pd import matplotlib.pyplot as plt def visualize(data): data_embedded =", "import pandas as pd import matplotlib.pyplot as plt def visualize(data): data_embedded = TSNE(n_components=2).fit_transform(data)", "from sklearn.manifold import TSNE import pandas as pd import matplotlib.pyplot as plt def", "import TSNE import pandas as pd import matplotlib.pyplot as plt def visualize(data): data_embedded", "pandas as pd import matplotlib.pyplot as plt def visualize(data): data_embedded = TSNE(n_components=2).fit_transform(data) print(data_embedded)", "pd import matplotlib.pyplot as plt def visualize(data): data_embedded = TSNE(n_components=2).fit_transform(data) print(data_embedded) plt.plot(data_embedded) plt.show()", "as pd import matplotlib.pyplot as plt def visualize(data): data_embedded = TSNE(n_components=2).fit_transform(data) print(data_embedded) plt.plot(data_embedded)", "sklearn.manifold import TSNE import pandas as pd import matplotlib.pyplot as plt def visualize(data):" ]
[ "not None: target_lang = pycountry.languages.lookup(target_lang) target_lang = target_lang.alpha_3 for s in streams: if", "s.is_standalone(): continue if s.get_language() == target_lang: yield s for s in streams: if", "int = None): r\"\"\" Returns streams by priority. Streams which are not part", "target_lang: Target language :param forced_stream: :return: \"\"\" for s in streams: if s.is_standalone():", "continue if s.get_language() == target_lang: yield s for s in streams: if s.is_standalone():", "language :param forced_stream: :return: \"\"\" for s in streams: if s.is_standalone(): yield s", "by the remaining streams. :param streams: List of Stream objects :param target_lang: Target", "== target_lang: yield s for s in streams: if s.is_standalone(): continue if s.get_language", "match a specified language, finally followed by the remaining streams. :param streams: List", "import Stream import pycountry import logging def picker(streams: [Stream], target_lang: str = None,", "priority. Streams which are not part of a container are preferred first, followed", "forced_stream: :return: \"\"\" for s in streams: if s.is_standalone(): yield s if forced_stream", "r\"\"\" Returns streams by priority. Streams which are not part of a container", "followed by the remaining streams. :param streams: List of Stream objects :param target_lang:", "None: yield streams[forced_stream] if target_lang is not None: target_lang = pycountry.languages.lookup(target_lang) target_lang =", "s for s in streams: if s.is_standalone(): continue if s.get_language == target_lang: continue", "s in streams: if s.is_standalone(): continue if s.get_language == target_lang: continue yield s", "specified language, finally followed by the remaining streams. :param streams: List of Stream", "[Stream], target_lang: str = None, forced_stream: int = None): r\"\"\" Returns streams by", "= None): r\"\"\" Returns streams by priority. Streams which are not part of", "are not part of a container are preferred first, followed by manually specified", "s.get_language() == target_lang: yield s for s in streams: if s.is_standalone(): continue if", "a specified language, finally followed by the remaining streams. :param streams: List of", "part of a container are preferred first, followed by manually specified stream indices,", "def picker(streams: [Stream], target_lang: str = None, forced_stream: int = None): r\"\"\" Returns", "are preferred first, followed by manually specified stream indices, then streams which match", "first, followed by manually specified stream indices, then streams which match a specified", "by priority. Streams which are not part of a container are preferred first,", "List of Stream objects :param target_lang: Target language :param forced_stream: :return: \"\"\" for", "Streams which are not part of a container are preferred first, followed by", "yield s if forced_stream is not None: yield streams[forced_stream] if target_lang is not", "not None: yield streams[forced_stream] if target_lang is not None: target_lang = pycountry.languages.lookup(target_lang) target_lang", "target_lang is not None: target_lang = pycountry.languages.lookup(target_lang) target_lang = target_lang.alpha_3 for s in", "if s.is_standalone(): continue if s.get_language() == target_lang: yield s for s in streams:", "finally followed by the remaining streams. :param streams: List of Stream objects :param", "for s in streams: if s.is_standalone(): continue if s.get_language == target_lang: continue yield", "followed by manually specified stream indices, then streams which match a specified language,", "not part of a container are preferred first, followed by manually specified stream", ":return: \"\"\" for s in streams: if s.is_standalone(): yield s if forced_stream is", "target_lang: yield s for s in streams: if s.is_standalone(): continue if s.get_language ==", "streams[forced_stream] if target_lang is not None: target_lang = pycountry.languages.lookup(target_lang) target_lang = target_lang.alpha_3 for", "if forced_stream is not None: yield streams[forced_stream] if target_lang is not None: target_lang", "= target_lang.alpha_3 for s in streams: if s.is_standalone(): continue if s.get_language() == target_lang:", "= pycountry.languages.lookup(target_lang) target_lang = target_lang.alpha_3 for s in streams: if s.is_standalone(): continue if", "s in streams: if s.is_standalone(): yield s if forced_stream is not None: yield", "import pycountry import logging def picker(streams: [Stream], target_lang: str = None, forced_stream: int", "streams which match a specified language, finally followed by the remaining streams. :param", "from subs2cia.sources import Stream import pycountry import logging def picker(streams: [Stream], target_lang: str", ":param forced_stream: :return: \"\"\" for s in streams: if s.is_standalone(): yield s if", "container are preferred first, followed by manually specified stream indices, then streams which", "= None, forced_stream: int = None): r\"\"\" Returns streams by priority. Streams which", "s if forced_stream is not None: yield streams[forced_stream] if target_lang is not None:", "pycountry import logging def picker(streams: [Stream], target_lang: str = None, forced_stream: int =", "import logging def picker(streams: [Stream], target_lang: str = None, forced_stream: int = None):", ":param streams: List of Stream objects :param target_lang: Target language :param forced_stream: :return:", "if s.get_language() == target_lang: yield s for s in streams: if s.is_standalone(): continue", "preferred first, followed by manually specified stream indices, then streams which match a", ":param target_lang: Target language :param forced_stream: :return: \"\"\" for s in streams: if", "yield streams[forced_stream] if target_lang is not None: target_lang = pycountry.languages.lookup(target_lang) target_lang = target_lang.alpha_3", "language, finally followed by the remaining streams. :param streams: List of Stream objects", "for s in streams: if s.is_standalone(): yield s if forced_stream is not None:", "objects :param target_lang: Target language :param forced_stream: :return: \"\"\" for s in streams:", "a container are preferred first, followed by manually specified stream indices, then streams", "pycountry.languages.lookup(target_lang) target_lang = target_lang.alpha_3 for s in streams: if s.is_standalone(): continue if s.get_language()", "is not None: yield streams[forced_stream] if target_lang is not None: target_lang = pycountry.languages.lookup(target_lang)", "if s.is_standalone(): yield s if forced_stream is not None: yield streams[forced_stream] if target_lang", "streams by priority. Streams which are not part of a container are preferred", "str = None, forced_stream: int = None): r\"\"\" Returns streams by priority. Streams", "yield s for s in streams: if s.is_standalone(): continue if s.get_language == target_lang:", "then streams which match a specified language, finally followed by the remaining streams.", "picker(streams: [Stream], target_lang: str = None, forced_stream: int = None): r\"\"\" Returns streams", "if target_lang is not None: target_lang = pycountry.languages.lookup(target_lang) target_lang = target_lang.alpha_3 for s", "s.is_standalone(): yield s if forced_stream is not None: yield streams[forced_stream] if target_lang is", "of a container are preferred first, followed by manually specified stream indices, then", "Stream objects :param target_lang: Target language :param forced_stream: :return: \"\"\" for s in", "in streams: if s.is_standalone(): yield s if forced_stream is not None: yield streams[forced_stream]", "Target language :param forced_stream: :return: \"\"\" for s in streams: if s.is_standalone(): yield", "is not None: target_lang = pycountry.languages.lookup(target_lang) target_lang = target_lang.alpha_3 for s in streams:", "forced_stream is not None: yield streams[forced_stream] if target_lang is not None: target_lang =", "target_lang: str = None, forced_stream: int = None): r\"\"\" Returns streams by priority.", "Stream import pycountry import logging def picker(streams: [Stream], target_lang: str = None, forced_stream:", "target_lang = pycountry.languages.lookup(target_lang) target_lang = target_lang.alpha_3 for s in streams: if s.is_standalone(): continue", "specified stream indices, then streams which match a specified language, finally followed by", "for s in streams: if s.is_standalone(): continue if s.get_language() == target_lang: yield s", "Returns streams by priority. Streams which are not part of a container are", "manually specified stream indices, then streams which match a specified language, finally followed", "subs2cia.sources import Stream import pycountry import logging def picker(streams: [Stream], target_lang: str =", "\"\"\" for s in streams: if s.is_standalone(): yield s if forced_stream is not", "the remaining streams. :param streams: List of Stream objects :param target_lang: Target language", "target_lang.alpha_3 for s in streams: if s.is_standalone(): continue if s.get_language() == target_lang: yield", "stream indices, then streams which match a specified language, finally followed by the", "remaining streams. :param streams: List of Stream objects :param target_lang: Target language :param", "streams: if s.is_standalone(): continue if s.get_language() == target_lang: yield s for s in", "streams: if s.is_standalone(): yield s if forced_stream is not None: yield streams[forced_stream] if", "by manually specified stream indices, then streams which match a specified language, finally", "of Stream objects :param target_lang: Target language :param forced_stream: :return: \"\"\" for s", "logging def picker(streams: [Stream], target_lang: str = None, forced_stream: int = None): r\"\"\"", "streams. :param streams: List of Stream objects :param target_lang: Target language :param forced_stream:", "in streams: if s.is_standalone(): continue if s.get_language() == target_lang: yield s for s", "indices, then streams which match a specified language, finally followed by the remaining", "target_lang = target_lang.alpha_3 for s in streams: if s.is_standalone(): continue if s.get_language() ==", "None): r\"\"\" Returns streams by priority. Streams which are not part of a", "s in streams: if s.is_standalone(): continue if s.get_language() == target_lang: yield s for", "None: target_lang = pycountry.languages.lookup(target_lang) target_lang = target_lang.alpha_3 for s in streams: if s.is_standalone():", "None, forced_stream: int = None): r\"\"\" Returns streams by priority. Streams which are", "streams: List of Stream objects :param target_lang: Target language :param forced_stream: :return: \"\"\"", "which are not part of a container are preferred first, followed by manually", "forced_stream: int = None): r\"\"\" Returns streams by priority. Streams which are not", "which match a specified language, finally followed by the remaining streams. :param streams:" ]
[]
[ "<reponame>movermeyer/pypkg # -*- coding: utf-8 -*- \"\"\" Common utilities. \"\"\" def fancy(message): \"\"\"Print", "-*- \"\"\" Common utilities. \"\"\" def fancy(message): \"\"\"Print message with surrounding ~'s.\"\"\" return", "# -*- coding: utf-8 -*- \"\"\" Common utilities. \"\"\" def fancy(message): \"\"\"Print message", "utf-8 -*- \"\"\" Common utilities. \"\"\" def fancy(message): \"\"\"Print message with surrounding ~'s.\"\"\"", "coding: utf-8 -*- \"\"\" Common utilities. \"\"\" def fancy(message): \"\"\"Print message with surrounding", "-*- coding: utf-8 -*- \"\"\" Common utilities. \"\"\" def fancy(message): \"\"\"Print message with", "\"\"\" Common utilities. \"\"\" def fancy(message): \"\"\"Print message with surrounding ~'s.\"\"\" return \"~{0}~\".format(message)" ]
[ "def compose_logger(name, log_file): logger = logging.Logger(name) hdlr = ConcurrentRotatingFileHandler( filename=os.path.join(LOG_FILE_DIR, log_file), maxBytes=logconf.MAX_BYTES, backupCount=logconf.BACK_UP_COUNT)", "= ConcurrentRotatingFileHandler( filename=os.path.join(LOG_FILE_DIR, log_file), maxBytes=logconf.MAX_BYTES, backupCount=logconf.BACK_UP_COUNT) formatter = logging.Formatter(logconf.VERBOSE_FORMATTER) hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.DEBUG) return", "#!/usr/bin/python # -*- coding: utf-8 -*- import logging import os from cloghandler import", "cloghandler import ConcurrentRotatingFileHandler import logconf def compose_logger(name, log_file): logger = logging.Logger(name) hdlr =", "hdlr = ConcurrentRotatingFileHandler( filename=os.path.join(LOG_FILE_DIR, log_file), maxBytes=logconf.MAX_BYTES, backupCount=logconf.BACK_UP_COUNT) formatter = logging.Formatter(logconf.VERBOSE_FORMATTER) hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.DEBUG)", "ConcurrentRotatingFileHandler( filename=os.path.join(LOG_FILE_DIR, log_file), maxBytes=logconf.MAX_BYTES, backupCount=logconf.BACK_UP_COUNT) formatter = logging.Formatter(logconf.VERBOSE_FORMATTER) hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.DEBUG) return logger", "logging.Logger(name) hdlr = ConcurrentRotatingFileHandler( filename=os.path.join(LOG_FILE_DIR, log_file), maxBytes=logconf.MAX_BYTES, backupCount=logconf.BACK_UP_COUNT) formatter = logging.Formatter(logconf.VERBOSE_FORMATTER) hdlr.setFormatter(formatter) logger.addHandler(hdlr)", "compose_logger(name, log_file): logger = logging.Logger(name) hdlr = ConcurrentRotatingFileHandler( filename=os.path.join(LOG_FILE_DIR, log_file), maxBytes=logconf.MAX_BYTES, backupCount=logconf.BACK_UP_COUNT) formatter", "-*- coding: utf-8 -*- import logging import os from cloghandler import ConcurrentRotatingFileHandler import", "import os from cloghandler import ConcurrentRotatingFileHandler import logconf def compose_logger(name, log_file): logger =", "log_file): logger = logging.Logger(name) hdlr = ConcurrentRotatingFileHandler( filename=os.path.join(LOG_FILE_DIR, log_file), maxBytes=logconf.MAX_BYTES, backupCount=logconf.BACK_UP_COUNT) formatter =", "= logging.Logger(name) hdlr = ConcurrentRotatingFileHandler( filename=os.path.join(LOG_FILE_DIR, log_file), maxBytes=logconf.MAX_BYTES, backupCount=logconf.BACK_UP_COUNT) formatter = logging.Formatter(logconf.VERBOSE_FORMATTER) hdlr.setFormatter(formatter)", "from cloghandler import ConcurrentRotatingFileHandler import logconf def compose_logger(name, log_file): logger = logging.Logger(name) hdlr", "logging import os from cloghandler import ConcurrentRotatingFileHandler import logconf def compose_logger(name, log_file): logger", "os from cloghandler import ConcurrentRotatingFileHandler import logconf def compose_logger(name, log_file): logger = logging.Logger(name)", "logger = logging.Logger(name) hdlr = ConcurrentRotatingFileHandler( filename=os.path.join(LOG_FILE_DIR, log_file), maxBytes=logconf.MAX_BYTES, backupCount=logconf.BACK_UP_COUNT) formatter = logging.Formatter(logconf.VERBOSE_FORMATTER)", "# -*- coding: utf-8 -*- import logging import os from cloghandler import ConcurrentRotatingFileHandler", "import ConcurrentRotatingFileHandler import logconf def compose_logger(name, log_file): logger = logging.Logger(name) hdlr = ConcurrentRotatingFileHandler(", "ConcurrentRotatingFileHandler import logconf def compose_logger(name, log_file): logger = logging.Logger(name) hdlr = ConcurrentRotatingFileHandler( filename=os.path.join(LOG_FILE_DIR,", "utf-8 -*- import logging import os from cloghandler import ConcurrentRotatingFileHandler import logconf def", "import logging import os from cloghandler import ConcurrentRotatingFileHandler import logconf def compose_logger(name, log_file):", "import logconf def compose_logger(name, log_file): logger = logging.Logger(name) hdlr = ConcurrentRotatingFileHandler( filename=os.path.join(LOG_FILE_DIR, log_file),", "logconf def compose_logger(name, log_file): logger = logging.Logger(name) hdlr = ConcurrentRotatingFileHandler( filename=os.path.join(LOG_FILE_DIR, log_file), maxBytes=logconf.MAX_BYTES,", "-*- import logging import os from cloghandler import ConcurrentRotatingFileHandler import logconf def compose_logger(name,", "coding: utf-8 -*- import logging import os from cloghandler import ConcurrentRotatingFileHandler import logconf" ]
[ "= nn.CrossEntropyLoss() def __call__(self, outputs, targets): if isinstance(outputs, dict): outputs = outputs[P.KEY_CLASS_SCORES] if", "outputs = outputs[P.KEY_CLASS_SCORES] if isinstance(targets, dict): targets = targets[P.KEY_LABEL_TARGETS] return self.crossent_loss(outputs, targets) #", "outputs[P.KEY_CLASS_SCORES] if isinstance(targets, dict): targets = targets[P.KEY_LABEL_TARGETS] return self.crossent_loss(outputs, targets) # Criterion manager", "as nn from ..optimization import MetricManager from neurolab import params as P #", "from ..optimization import MetricManager from neurolab import params as P # Wrapper around", "# Wrapper around Pytorch CrossEntropyLoss criterion class CrossEntMetric: def __init__(self): self.crossent_loss = nn.CrossEntropyLoss()", "dict): targets = targets[P.KEY_LABEL_TARGETS] return self.crossent_loss(outputs, targets) # Criterion manager for cross entropy", "for cross entropy loss class CrossEntMetricManager(MetricManager): def __init__(self, config): super().__init__(config) def get_metric(self): return", "self.crossent_loss = nn.CrossEntropyLoss() def __call__(self, outputs, targets): if isinstance(outputs, dict): outputs = outputs[P.KEY_CLASS_SCORES]", "__call__(self, outputs, targets): if isinstance(outputs, dict): outputs = outputs[P.KEY_CLASS_SCORES] if isinstance(targets, dict): targets", "__init__(self, config): super().__init__(config) def get_metric(self): return CrossEntMetric() def higher_is_better(self): return False def get_name(self):", "super().__init__(config) def get_metric(self): return CrossEntMetric() def higher_is_better(self): return False def get_name(self): return \"cross-entropy\"", "class CrossEntMetricManager(MetricManager): def __init__(self, config): super().__init__(config) def get_metric(self): return CrossEntMetric() def higher_is_better(self): return", "Pytorch CrossEntropyLoss criterion class CrossEntMetric: def __init__(self): self.crossent_loss = nn.CrossEntropyLoss() def __call__(self, outputs,", "return self.crossent_loss(outputs, targets) # Criterion manager for cross entropy loss class CrossEntMetricManager(MetricManager): def", "loss class CrossEntMetricManager(MetricManager): def __init__(self, config): super().__init__(config) def get_metric(self): return CrossEntMetric() def higher_is_better(self):", "def __init__(self, config): super().__init__(config) def get_metric(self): return CrossEntMetric() def higher_is_better(self): return False def", "targets[P.KEY_LABEL_TARGETS] return self.crossent_loss(outputs, targets) # Criterion manager for cross entropy loss class CrossEntMetricManager(MetricManager):", "criterion class CrossEntMetric: def __init__(self): self.crossent_loss = nn.CrossEntropyLoss() def __call__(self, outputs, targets): if", "def __init__(self): self.crossent_loss = nn.CrossEntropyLoss() def __call__(self, outputs, targets): if isinstance(outputs, dict): outputs", "targets) # Criterion manager for cross entropy loss class CrossEntMetricManager(MetricManager): def __init__(self, config):", "MetricManager from neurolab import params as P # Wrapper around Pytorch CrossEntropyLoss criterion", "P # Wrapper around Pytorch CrossEntropyLoss criterion class CrossEntMetric: def __init__(self): self.crossent_loss =", "__init__(self): self.crossent_loss = nn.CrossEntropyLoss() def __call__(self, outputs, targets): if isinstance(outputs, dict): outputs =", "def __call__(self, outputs, targets): if isinstance(outputs, dict): outputs = outputs[P.KEY_CLASS_SCORES] if isinstance(targets, dict):", "isinstance(outputs, dict): outputs = outputs[P.KEY_CLASS_SCORES] if isinstance(targets, dict): targets = targets[P.KEY_LABEL_TARGETS] return self.crossent_loss(outputs,", "# Criterion manager for cross entropy loss class CrossEntMetricManager(MetricManager): def __init__(self, config): super().__init__(config)", "CrossEntMetric: def __init__(self): self.crossent_loss = nn.CrossEntropyLoss() def __call__(self, outputs, targets): if isinstance(outputs, dict):", "Wrapper around Pytorch CrossEntropyLoss criterion class CrossEntMetric: def __init__(self): self.crossent_loss = nn.CrossEntropyLoss() def", "torch.nn as nn from ..optimization import MetricManager from neurolab import params as P", "import params as P # Wrapper around Pytorch CrossEntropyLoss criterion class CrossEntMetric: def", "<reponame>udday2014/HebbianLearning import torch.nn as nn from ..optimization import MetricManager from neurolab import params", "around Pytorch CrossEntropyLoss criterion class CrossEntMetric: def __init__(self): self.crossent_loss = nn.CrossEntropyLoss() def __call__(self,", "neurolab import params as P # Wrapper around Pytorch CrossEntropyLoss criterion class CrossEntMetric:", "nn from ..optimization import MetricManager from neurolab import params as P # Wrapper", "isinstance(targets, dict): targets = targets[P.KEY_LABEL_TARGETS] return self.crossent_loss(outputs, targets) # Criterion manager for cross", "as P # Wrapper around Pytorch CrossEntropyLoss criterion class CrossEntMetric: def __init__(self): self.crossent_loss", "outputs, targets): if isinstance(outputs, dict): outputs = outputs[P.KEY_CLASS_SCORES] if isinstance(targets, dict): targets =", "if isinstance(targets, dict): targets = targets[P.KEY_LABEL_TARGETS] return self.crossent_loss(outputs, targets) # Criterion manager for", "if isinstance(outputs, dict): outputs = outputs[P.KEY_CLASS_SCORES] if isinstance(targets, dict): targets = targets[P.KEY_LABEL_TARGETS] return", "params as P # Wrapper around Pytorch CrossEntropyLoss criterion class CrossEntMetric: def __init__(self):", "entropy loss class CrossEntMetricManager(MetricManager): def __init__(self, config): super().__init__(config) def get_metric(self): return CrossEntMetric() def", "manager for cross entropy loss class CrossEntMetricManager(MetricManager): def __init__(self, config): super().__init__(config) def get_metric(self):", "nn.CrossEntropyLoss() def __call__(self, outputs, targets): if isinstance(outputs, dict): outputs = outputs[P.KEY_CLASS_SCORES] if isinstance(targets,", "CrossEntropyLoss criterion class CrossEntMetric: def __init__(self): self.crossent_loss = nn.CrossEntropyLoss() def __call__(self, outputs, targets):", "targets): if isinstance(outputs, dict): outputs = outputs[P.KEY_CLASS_SCORES] if isinstance(targets, dict): targets = targets[P.KEY_LABEL_TARGETS]", "cross entropy loss class CrossEntMetricManager(MetricManager): def __init__(self, config): super().__init__(config) def get_metric(self): return CrossEntMetric()", "config): super().__init__(config) def get_metric(self): return CrossEntMetric() def higher_is_better(self): return False def get_name(self): return", "import torch.nn as nn from ..optimization import MetricManager from neurolab import params as", "= outputs[P.KEY_CLASS_SCORES] if isinstance(targets, dict): targets = targets[P.KEY_LABEL_TARGETS] return self.crossent_loss(outputs, targets) # Criterion", "dict): outputs = outputs[P.KEY_CLASS_SCORES] if isinstance(targets, dict): targets = targets[P.KEY_LABEL_TARGETS] return self.crossent_loss(outputs, targets)", "= targets[P.KEY_LABEL_TARGETS] return self.crossent_loss(outputs, targets) # Criterion manager for cross entropy loss class", "Criterion manager for cross entropy loss class CrossEntMetricManager(MetricManager): def __init__(self, config): super().__init__(config) def", "self.crossent_loss(outputs, targets) # Criterion manager for cross entropy loss class CrossEntMetricManager(MetricManager): def __init__(self,", "import MetricManager from neurolab import params as P # Wrapper around Pytorch CrossEntropyLoss", "from neurolab import params as P # Wrapper around Pytorch CrossEntropyLoss criterion class", "targets = targets[P.KEY_LABEL_TARGETS] return self.crossent_loss(outputs, targets) # Criterion manager for cross entropy loss", "..optimization import MetricManager from neurolab import params as P # Wrapper around Pytorch", "CrossEntMetricManager(MetricManager): def __init__(self, config): super().__init__(config) def get_metric(self): return CrossEntMetric() def higher_is_better(self): return False", "class CrossEntMetric: def __init__(self): self.crossent_loss = nn.CrossEntropyLoss() def __call__(self, outputs, targets): if isinstance(outputs," ]
[ "def get_brute_force(): embedding = request.args.get('embedding') embedding = embedding.replace('[', '') embedding = embedding.replace(']', '')", "data = pickle.load(data_file) logger.info(idx_to_class) return kdtree, idx_to_class, data app = Flask(__name__) @app.route(\"/who_brute\", methods=[\"GET\"])", "embedding = embedding.replace(']', '') embedding = np.fromstring(embedding, dtype=float, sep=', ') return idx_to_class[kdtree.get_nn(embedding, 1)[0][1]]", "models.mtcnn import MTCNN from models.inception_resnet_v1 import InceptionResnetV1 import torch from torch.utils.data import DataLoader", "'celebs.index': index_type = 'added_.index' with open('indexes/' + index_type, 'rb') as index_file: kdtree =", "embedding.replace('[', '') embedding = embedding.replace(']', '') embedding = np.fromstring(embedding, dtype=float, sep=', ') return", "get names.') parser.add_argument('--port', type=int, default=5000, help='port number') parser.add_argument('--index_type', type=str, default='celebs.index', help='type of index')", "dataset.class_to_idx.items()}) dataset.idx_to_class = {i: c for c, i in dataset.class_to_idx.items()} loader = DataLoader(dataset,", "x, y in loader: embedding = embder.embed_one(x) if embedding is not None: embedding", "+ dataset_folder.split('/')[-1] + '.index', 'wb') as index_file: pickle.dump(kdtree, index_file) with open('indexes/' + dataset_folder.split('/')[-1]", "embedding = np.fromstring(embedding, dtype=float, sep=', ') return idx_to_class[kdtree.get_nn(embedding, 1)[0][1]] CORS(app) if __name__ ==", "pickle.load(index_file) with open('indexes/' + index_type.split('.')[0] + '.idx_to_class', 'rb') as idx_to_class_file: idx_to_class = pickle.load(idx_to_class_file)", "import MTCNN from models.inception_resnet_v1 import InceptionResnetV1 import torch from torch.utils.data import DataLoader from", "with open('indexes/' + dataset_folder.split('/')[-1] + '.idx_to_class', 'wb') as idx_to_class_file: pickle.dump(dataset.idx_to_class, idx_to_class_file) with open('indexes/'", "np from scipy.spatial import distance import pickle from models.mtcnn import MTCNN from models.inception_resnet_v1", "downloadDirectoryFroms3(bucketName, remoteDirectoryName): s3_resource = boto3.resource('s3', aws_access_key_id='<KEY>', aws_secret_access_key='<KEY>') bucket = s3_resource.Bucket(bucketName) for object in", "names.') parser.add_argument('--port', type=int, default=5000, help='port number') parser.add_argument('--index_type', type=str, default='celebs.index', help='type of index') #", "action='store_const', # const=sum, default=max, # help='sum the integers (default: find the max)') args", "o in os.listdir(REMOTE_DIRECTORY_NAME) if os.path.isdir(os.path.join(REMOTE_DIRECTORY_NAME, o))] for dataset_folder in sub: build_kd_tree(dataset_folder) def get_index(index_type):", "methods=[\"GET\"]) def get_brute_force(): embedding = request.args.get('embedding') embedding = embedding.replace('[', '') embedding = embedding.replace(']',", "ConfigParser() config.read('config.ini') FACE_THRESHOLD = config.getfloat('main', 'face_threshold') METHOD = config.get('main', 'method') CUDA = config.getboolean('main',", "idx_to_class_file) with open('indexes/' + dataset_folder.split('/')[-1] + '.index', 'wb') as index_file: pickle.dump(kdtree, index_file) with", "+ ' ' + str(dist)) return idx_to_class[closest] @app.route(\"/who_tree\", methods=[\"GET\"]) def get_name(): embedding =", "embedding is not None: embedding = embedding[0] R.append((embedding, y)) kdtree = Node(K=EMBEDDING_SIZE).build_kd_tree(R) with", "closest = y if closest > 1 + FACE_THRESHOLD: logger.info(\"Unknown face\") return \"Unknown", "random, time, sys from configparser import ConfigParser import boto3 from loguru import logger", "+ index_type, 'rb') as index_file: kdtree = pickle.load(index_file) with open('indexes/' + index_type.split('.')[0] +", "CUDA = config.getboolean('main', 'cuda') DEBUG_ENV = config.getboolean('main', 'debug') EMBEDDING_SIZE = 512 BUCKET_NAME =", "kdtree, idx_to_class, data app = Flask(__name__) @app.route(\"/who_brute\", methods=[\"GET\"]) def get_brute_force(): embedding = request.args.get('embedding')", "= config.get('main', 'method') CUDA = config.getboolean('main', 'cuda') DEBUG_ENV = config.getboolean('main', 'debug') EMBEDDING_SIZE =", "for object in bucket.objects.filter(Prefix=remoteDirectoryName): if not os.path.exists(os.path.dirname(object.key)): os.makedirs(os.path.dirname(object.key)) bucket.download_file(object.key, object.key) def build_kd_tree(dataset_folder): logger.info(dataset_folder.split('/')[-1])", "idx_to_class_file: pickle.dump(dataset.idx_to_class, idx_to_class_file) with open('indexes/' + dataset_folder.split('/')[-1] + '.index', 'wb') as index_file: pickle.dump(kdtree,", "+ index_type.split('.')[0] + '.data', 'rb') as data_file: data = pickle.load(data_file) logger.info(idx_to_class) return kdtree,", "import datasets import numpy as np import pandas as pd import os import", "= Embedder() R = [] for x, y in loader: embedding = embder.embed_one(x)", "face\") return \"Unknown face Similar to idx_to_class[closest]\" logger.info(idx_to_class[closest] + ' ' + str(dist))", "config.getfloat('main', 'face_threshold') METHOD = config.get('main', 'method') CUDA = config.getboolean('main', 'cuda') DEBUG_ENV = config.getboolean('main',", "idx_to_class[closest] @app.route(\"/who_tree\", methods=[\"GET\"]) def get_name(): embedding = request.args.get('embedding') embedding = embedding.replace('[', '') embedding", "config.getboolean('main', 'cuda') DEBUG_ENV = config.getboolean('main', 'debug') EMBEDDING_SIZE = 512 BUCKET_NAME = 'info-ret-final-project' REMOTE_DIRECTORY_NAME", "matplotlib.pyplot as plt import numpy as np from scipy.spatial import distance import pickle", "REMOTE_DIRECTORY_NAME = 'data' # DEBUG_ENV = bool(os.getenv(\"DEBUG_ENV\", False)) # PORT = int(os.getenv(\"PORT\", 5001))", "with open('indexes/' + dataset_folder.split('/')[-1] + '.data', 'wb') as data_file: pickle.dump(R, data_file) def build_indexes():", "o))] for dataset_folder in sub: build_kd_tree(dataset_folder) def get_index(index_type): if index_type == 'celebs.index': index_type", "if index_type == 'celebs.index': index_type = 'added_.index' with open('indexes/' + index_type, 'rb') as", "= datasets.ImageFolder(dataset_folder) logger.info({i: c for c, i in dataset.class_to_idx.items()}) dataset.idx_to_class = {i: c", "'celebs.index') def collate_fn(x): return x[0] def downloadDirectoryFroms3(bucketName, remoteDirectoryName): s3_resource = boto3.resource('s3', aws_access_key_id='<KEY>', aws_secret_access_key='<KEY>')", "from torchvision import datasets import numpy as np import pandas as pd import", "import pandas as pd import os import pickle from embeddings import Embedder from", "R = [] for x, y in loader: embedding = embder.embed_one(x) if embedding", "\"Unknown face Similar to idx_to_class[closest]\" logger.info(idx_to_class[closest] + ' ' + str(dist)) return idx_to_class[closest]", "= Node(K=EMBEDDING_SIZE).build_kd_tree(R) with open('indexes/' + dataset_folder.split('/')[-1] + '.idx_to_class', 'wb') as idx_to_class_file: pickle.dump(dataset.idx_to_class, idx_to_class_file)", "# parser.add_argument('--sum', dest='accumulate', action='store_const', # const=sum, default=max, # help='sum the integers (default: find", "= DataLoader(dataset, collate_fn=collate_fn, num_workers=4) embder = Embedder() R = [] for x, y", "default=max, # help='sum the integers (default: find the max)') args = parser.parse_args() PORT", "INDEX_TYPE = os.getenv(\"INDEX_TYPE\", 'celebs.index') def collate_fn(x): return x[0] def downloadDirectoryFroms3(bucketName, remoteDirectoryName): s3_resource =", "'.idx_to_class', 'wb') as idx_to_class_file: pickle.dump(dataset.idx_to_class, idx_to_class_file) with open('indexes/' + dataset_folder.split('/')[-1] + '.index', 'wb')", "idx_to_class[kdtree.get_nn(embedding, 1)[0][1]] CORS(app) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='Index which can be", "return kdtree, idx_to_class, data app = Flask(__name__) @app.route(\"/who_brute\", methods=[\"GET\"]) def get_brute_force(): embedding =", "== 'celebs.index': index_type = 'added_.index' with open('indexes/' + index_type, 'rb') as index_file: kdtree", "idx_to_class_file: idx_to_class = pickle.load(idx_to_class_file) with open('indexes/' + index_type.split('.')[0] + '.data', 'rb') as data_file:", "from models.inception_resnet_v1 import InceptionResnetV1 import torch from torch.utils.data import DataLoader from torchvision import", "embedding.replace('[', '') embedding = embedding.replace(']', '') embedding = np.fromstring(embedding, dtype=float, sep=', ') closest", "[os.path.join(REMOTE_DIRECTORY_NAME, o) for o in os.listdir(REMOTE_DIRECTORY_NAME) if os.path.isdir(os.path.join(REMOTE_DIRECTORY_NAME, o))] for dataset_folder in sub:", "in bucket.objects.filter(Prefix=remoteDirectoryName): if not os.path.exists(os.path.dirname(object.key)): os.makedirs(os.path.dirname(object.key)) bucket.download_file(object.key, object.key) def build_kd_tree(dataset_folder): logger.info(dataset_folder.split('/')[-1]) dataset =", "downloadDirectoryFroms3(BUCKET_NAME, REMOTE_DIRECTORY_NAME) sub = [os.path.join(REMOTE_DIRECTORY_NAME, o) for o in os.listdir(REMOTE_DIRECTORY_NAME) if os.path.isdir(os.path.join(REMOTE_DIRECTORY_NAME, o))]", "get_index(index_type): if index_type == 'celebs.index': index_type = 'added_.index' with open('indexes/' + index_type, 'rb')", "to get names.') parser.add_argument('--port', type=int, default=5000, help='port number') parser.add_argument('--index_type', type=str, default='celebs.index', help='type of", "pickle.dump(dataset.idx_to_class, idx_to_class_file) with open('indexes/' + dataset_folder.split('/')[-1] + '.index', 'wb') as index_file: pickle.dump(kdtree, index_file)", "ConfigParser import boto3 from loguru import logger from tqdm import tqdm_notebook import matplotlib.pyplot", "torch from torch.utils.data import DataLoader from torchvision import datasets import numpy as np", "with open('indexes/' + index_type.split('.')[0] + '.idx_to_class', 'rb') as idx_to_class_file: idx_to_class = pickle.load(idx_to_class_file) with", "INDEX_TYPE = args.index_type kdtree, idx_to_class, data = get_index(INDEX_TYPE) # print(PORT, INDEX_TYPE) if not", "DataLoader(dataset, collate_fn=collate_fn, num_workers=4) embder = Embedder() R = [] for x, y in", "as pd import os import pickle from embeddings import Embedder from index import", "import boto3 from loguru import logger from tqdm import tqdm_notebook import matplotlib.pyplot as", "parser.add_argument('--sum', dest='accumulate', action='store_const', # const=sum, default=max, # help='sum the integers (default: find the", "') closest = 0 dist = np.inf for emb, y in data: cur", "CORS from waitress import serve config = ConfigParser() config.read('config.ini') FACE_THRESHOLD = config.getfloat('main', 'face_threshold')", "os import pickle from embeddings import Embedder from index import Node from flask", "if os.path.isdir(os.path.join(REMOTE_DIRECTORY_NAME, o))] for dataset_folder in sub: build_kd_tree(dataset_folder) def get_index(index_type): if index_type ==", "serve config = ConfigParser() config.read('config.ini') FACE_THRESHOLD = config.getfloat('main', 'face_threshold') METHOD = config.get('main', 'method')", "import argparse import random, time, sys from configparser import ConfigParser import boto3 from", "dataset_folder.split('/')[-1] + '.index', 'wb') as index_file: pickle.dump(kdtree, index_file) with open('indexes/' + dataset_folder.split('/')[-1] +", "'.idx_to_class', 'rb') as idx_to_class_file: idx_to_class = pickle.load(idx_to_class_file) with open('indexes/' + index_type.split('.')[0] + '.data',", "x[0] def downloadDirectoryFroms3(bucketName, remoteDirectoryName): s3_resource = boto3.resource('s3', aws_access_key_id='<KEY>', aws_secret_access_key='<KEY>') bucket = s3_resource.Bucket(bucketName) for", "import os import pickle from embeddings import Embedder from index import Node from", "< dist: dist = cur closest = y if closest > 1 +", "METHOD = config.get('main', 'method') CUDA = config.getboolean('main', 'cuda') DEBUG_ENV = config.getboolean('main', 'debug') EMBEDDING_SIZE", "idx_to_class, data = get_index(INDEX_TYPE) # print(PORT, INDEX_TYPE) if not DEBUG_ENV: serve(app, host='0.0.0.0', port=PORT)", "s3_resource.Bucket(bucketName) for object in bucket.objects.filter(Prefix=remoteDirectoryName): if not os.path.exists(os.path.dirname(object.key)): os.makedirs(os.path.dirname(object.key)) bucket.download_file(object.key, object.key) def build_kd_tree(dataset_folder):", "'face_threshold') METHOD = config.get('main', 'method') CUDA = config.getboolean('main', 'cuda') DEBUG_ENV = config.getboolean('main', 'debug')", "R.append((embedding, y)) kdtree = Node(K=EMBEDDING_SIZE).build_kd_tree(R) with open('indexes/' + dataset_folder.split('/')[-1] + '.idx_to_class', 'wb') as", "str(dist)) return idx_to_class[closest] @app.route(\"/who_tree\", methods=[\"GET\"]) def get_name(): embedding = request.args.get('embedding') embedding = embedding.replace('[',", "= args.port INDEX_TYPE = args.index_type kdtree, idx_to_class, data = get_index(INDEX_TYPE) # print(PORT, INDEX_TYPE)", "import torch from torch.utils.data import DataLoader from torchvision import datasets import numpy as", "dtype=float, sep=', ') return idx_to_class[kdtree.get_nn(embedding, 1)[0][1]] CORS(app) if __name__ == \"__main__\": parser =", "import serve config = ConfigParser() config.read('config.ini') FACE_THRESHOLD = config.getfloat('main', 'face_threshold') METHOD = config.get('main',", "import DataLoader from torchvision import datasets import numpy as np import pandas as", "Similar to idx_to_class[closest]\" logger.info(idx_to_class[closest] + ' ' + str(dist)) return idx_to_class[closest] @app.route(\"/who_tree\", methods=[\"GET\"])", "pandas as pd import os import pickle from embeddings import Embedder from index", "'rb') as index_file: kdtree = pickle.load(index_file) with open('indexes/' + index_type.split('.')[0] + '.idx_to_class', 'rb')", "bucket.objects.filter(Prefix=remoteDirectoryName): if not os.path.exists(os.path.dirname(object.key)): os.makedirs(os.path.dirname(object.key)) bucket.download_file(object.key, object.key) def build_kd_tree(dataset_folder): logger.info(dataset_folder.split('/')[-1]) dataset = datasets.ImageFolder(dataset_folder)", "= config.getboolean('main', 'debug') EMBEDDING_SIZE = 512 BUCKET_NAME = 'info-ret-final-project' REMOTE_DIRECTORY_NAME = 'data' #", "index_file) with open('indexes/' + dataset_folder.split('/')[-1] + '.data', 'wb') as data_file: pickle.dump(R, data_file) def", "# DEBUG_ENV = bool(os.getenv(\"DEBUG_ENV\", False)) # PORT = int(os.getenv(\"PORT\", 5001)) # INDEX_TYPE =", "as data_file: pickle.dump(R, data_file) def build_indexes(): downloadDirectoryFroms3(BUCKET_NAME, REMOTE_DIRECTORY_NAME) sub = [os.path.join(REMOTE_DIRECTORY_NAME, o) for", "config.get('main', 'method') CUDA = config.getboolean('main', 'cuda') DEBUG_ENV = config.getboolean('main', 'debug') EMBEDDING_SIZE = 512", "import ConfigParser import boto3 from loguru import logger from tqdm import tqdm_notebook import", "args = parser.parse_args() PORT = args.port INDEX_TYPE = args.index_type kdtree, idx_to_class, data =", "- embedding) if cur < dist: dist = cur closest = y if", "jsonify, Response from flask_cors import CORS from waitress import serve config = ConfigParser()", "argparse.ArgumentParser(description='Index which can be used to get names.') parser.add_argument('--port', type=int, default=5000, help='port number')", "os.path.isdir(os.path.join(REMOTE_DIRECTORY_NAME, o))] for dataset_folder in sub: build_kd_tree(dataset_folder) def get_index(index_type): if index_type == 'celebs.index':", "= 'info-ret-final-project' REMOTE_DIRECTORY_NAME = 'data' # DEBUG_ENV = bool(os.getenv(\"DEBUG_ENV\", False)) # PORT =", "embedding = np.fromstring(embedding, dtype=float, sep=', ') closest = 0 dist = np.inf for", "embeddings import Embedder from index import Node from flask import Flask, request, jsonify,", "'wb') as idx_to_class_file: pickle.dump(dataset.idx_to_class, idx_to_class_file) with open('indexes/' + dataset_folder.split('/')[-1] + '.index', 'wb') as", "args.index_type kdtree, idx_to_class, data = get_index(INDEX_TYPE) # print(PORT, INDEX_TYPE) if not DEBUG_ENV: serve(app,", "with open('indexes/' + dataset_folder.split('/')[-1] + '.index', 'wb') as index_file: pickle.dump(kdtree, index_file) with open('indexes/'", "as idx_to_class_file: idx_to_class = pickle.load(idx_to_class_file) with open('indexes/' + index_type.split('.')[0] + '.data', 'rb') as", "to idx_to_class[closest]\" logger.info(idx_to_class[closest] + ' ' + str(dist)) return idx_to_class[closest] @app.route(\"/who_tree\", methods=[\"GET\"]) def", "the max)') args = parser.parse_args() PORT = args.port INDEX_TYPE = args.index_type kdtree, idx_to_class,", "time, sys from configparser import ConfigParser import boto3 from loguru import logger from", "') return idx_to_class[kdtree.get_nn(embedding, 1)[0][1]] CORS(app) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='Index which", "5001)) # INDEX_TYPE = os.getenv(\"INDEX_TYPE\", 'celebs.index') def collate_fn(x): return x[0] def downloadDirectoryFroms3(bucketName, remoteDirectoryName):", "'') embedding = embedding.replace(']', '') embedding = np.fromstring(embedding, dtype=float, sep=', ') return idx_to_class[kdtree.get_nn(embedding,", "Response from flask_cors import CORS from waitress import serve config = ConfigParser() config.read('config.ini')", "return idx_to_class[closest] @app.route(\"/who_tree\", methods=[\"GET\"]) def get_name(): embedding = request.args.get('embedding') embedding = embedding.replace('[', '')", "find the max)') args = parser.parse_args() PORT = args.port INDEX_TYPE = args.index_type kdtree,", "import numpy as np from scipy.spatial import distance import pickle from models.mtcnn import", "if not os.path.exists(os.path.dirname(object.key)): os.makedirs(os.path.dirname(object.key)) bucket.download_file(object.key, object.key) def build_kd_tree(dataset_folder): logger.info(dataset_folder.split('/')[-1]) dataset = datasets.ImageFolder(dataset_folder) logger.info({i:", "FACE_THRESHOLD: logger.info(\"Unknown face\") return \"Unknown face Similar to idx_to_class[closest]\" logger.info(idx_to_class[closest] + ' '", "# print(PORT, INDEX_TYPE) if not DEBUG_ENV: serve(app, host='0.0.0.0', port=PORT) else: app.run(debug=True, host='0.0.0.0', port=PORT)", "DEBUG_ENV = bool(os.getenv(\"DEBUG_ENV\", False)) # PORT = int(os.getenv(\"PORT\", 5001)) # INDEX_TYPE = os.getenv(\"INDEX_TYPE\",", "from embeddings import Embedder from index import Node from flask import Flask, request,", "REMOTE_DIRECTORY_NAME) sub = [os.path.join(REMOTE_DIRECTORY_NAME, o) for o in os.listdir(REMOTE_DIRECTORY_NAME) if os.path.isdir(os.path.join(REMOTE_DIRECTORY_NAME, o))] for", "import InceptionResnetV1 import torch from torch.utils.data import DataLoader from torchvision import datasets import", "MTCNN from models.inception_resnet_v1 import InceptionResnetV1 import torch from torch.utils.data import DataLoader from torchvision", "import numpy as np import pandas as pd import os import pickle from", "logger.info(dataset_folder.split('/')[-1]) dataset = datasets.ImageFolder(dataset_folder) logger.info({i: c for c, i in dataset.class_to_idx.items()}) dataset.idx_to_class =", "@app.route(\"/who_tree\", methods=[\"GET\"]) def get_name(): embedding = request.args.get('embedding') embedding = embedding.replace('[', '') embedding =", "data_file: data = pickle.load(data_file) logger.info(idx_to_class) return kdtree, idx_to_class, data app = Flask(__name__) @app.route(\"/who_brute\",", "'debug') EMBEDDING_SIZE = 512 BUCKET_NAME = 'info-ret-final-project' REMOTE_DIRECTORY_NAME = 'data' # DEBUG_ENV =", "return x[0] def downloadDirectoryFroms3(bucketName, remoteDirectoryName): s3_resource = boto3.resource('s3', aws_access_key_id='<KEY>', aws_secret_access_key='<KEY>') bucket = s3_resource.Bucket(bucketName)", "c, i in dataset.class_to_idx.items()}) dataset.idx_to_class = {i: c for c, i in dataset.class_to_idx.items()}", "default=5000, help='port number') parser.add_argument('--index_type', type=str, default='celebs.index', help='type of index') # parser.add_argument('--sum', dest='accumulate', action='store_const',", "DataLoader from torchvision import datasets import numpy as np import pandas as pd", "= y if closest > 1 + FACE_THRESHOLD: logger.info(\"Unknown face\") return \"Unknown face", "flask import Flask, request, jsonify, Response from flask_cors import CORS from waitress import", "for x, y in loader: embedding = embder.embed_one(x) if embedding is not None:", "== \"__main__\": parser = argparse.ArgumentParser(description='Index which can be used to get names.') parser.add_argument('--port',", "int(os.getenv(\"PORT\", 5001)) # INDEX_TYPE = os.getenv(\"INDEX_TYPE\", 'celebs.index') def collate_fn(x): return x[0] def downloadDirectoryFroms3(bucketName,", "type=str, default='celebs.index', help='type of index') # parser.add_argument('--sum', dest='accumulate', action='store_const', # const=sum, default=max, #", "index_type == 'celebs.index': index_type = 'added_.index' with open('indexes/' + index_type, 'rb') as index_file:", "open('indexes/' + index_type.split('.')[0] + '.data', 'rb') as data_file: data = pickle.load(data_file) logger.info(idx_to_class) return", "i in dataset.class_to_idx.items()} loader = DataLoader(dataset, collate_fn=collate_fn, num_workers=4) embder = Embedder() R =", "dest='accumulate', action='store_const', # const=sum, default=max, # help='sum the integers (default: find the max)')", "datasets.ImageFolder(dataset_folder) logger.info({i: c for c, i in dataset.class_to_idx.items()}) dataset.idx_to_class = {i: c for", "index') # parser.add_argument('--sum', dest='accumulate', action='store_const', # const=sum, default=max, # help='sum the integers (default:", "y)) kdtree = Node(K=EMBEDDING_SIZE).build_kd_tree(R) with open('indexes/' + dataset_folder.split('/')[-1] + '.idx_to_class', 'wb') as idx_to_class_file:", "sub = [os.path.join(REMOTE_DIRECTORY_NAME, o) for o in os.listdir(REMOTE_DIRECTORY_NAME) if os.path.isdir(os.path.join(REMOTE_DIRECTORY_NAME, o))] for dataset_folder", "aws_secret_access_key='<KEY>') bucket = s3_resource.Bucket(bucketName) for object in bucket.objects.filter(Prefix=remoteDirectoryName): if not os.path.exists(os.path.dirname(object.key)): os.makedirs(os.path.dirname(object.key)) bucket.download_file(object.key,", "pickle from embeddings import Embedder from index import Node from flask import Flask,", "FACE_THRESHOLD = config.getfloat('main', 'face_threshold') METHOD = config.get('main', 'method') CUDA = config.getboolean('main', 'cuda') DEBUG_ENV", "os.listdir(REMOTE_DIRECTORY_NAME) if os.path.isdir(os.path.join(REMOTE_DIRECTORY_NAME, o))] for dataset_folder in sub: build_kd_tree(dataset_folder) def get_index(index_type): if index_type", "<filename>index_builder.py import argparse import random, time, sys from configparser import ConfigParser import boto3", "'') embedding = np.fromstring(embedding, dtype=float, sep=', ') closest = 0 dist = np.inf", "loader = DataLoader(dataset, collate_fn=collate_fn, num_workers=4) embder = Embedder() R = [] for x,", "from tqdm import tqdm_notebook import matplotlib.pyplot as plt import numpy as np from", "object in bucket.objects.filter(Prefix=remoteDirectoryName): if not os.path.exists(os.path.dirname(object.key)): os.makedirs(os.path.dirname(object.key)) bucket.download_file(object.key, object.key) def build_kd_tree(dataset_folder): logger.info(dataset_folder.split('/')[-1]) dataset", "def get_name(): embedding = request.args.get('embedding') embedding = embedding.replace('[', '') embedding = embedding.replace(']', '')", "cur closest = y if closest > 1 + FACE_THRESHOLD: logger.info(\"Unknown face\") return", "PORT = args.port INDEX_TYPE = args.index_type kdtree, idx_to_class, data = get_index(INDEX_TYPE) # print(PORT,", "import pickle from embeddings import Embedder from index import Node from flask import", "c for c, i in dataset.class_to_idx.items()} loader = DataLoader(dataset, collate_fn=collate_fn, num_workers=4) embder =", "dtype=float, sep=', ') closest = 0 dist = np.inf for emb, y in", "help='sum the integers (default: find the max)') args = parser.parse_args() PORT = args.port", "data_file) def build_indexes(): downloadDirectoryFroms3(BUCKET_NAME, REMOTE_DIRECTORY_NAME) sub = [os.path.join(REMOTE_DIRECTORY_NAME, o) for o in os.listdir(REMOTE_DIRECTORY_NAME)", "methods=[\"GET\"]) def get_name(): embedding = request.args.get('embedding') embedding = embedding.replace('[', '') embedding = embedding.replace(']',", "= config.getfloat('main', 'face_threshold') METHOD = config.get('main', 'method') CUDA = config.getboolean('main', 'cuda') DEBUG_ENV =", "index_type = 'added_.index' with open('indexes/' + index_type, 'rb') as index_file: kdtree = pickle.load(index_file)", "Node(K=EMBEDDING_SIZE).build_kd_tree(R) with open('indexes/' + dataset_folder.split('/')[-1] + '.idx_to_class', 'wb') as idx_to_class_file: pickle.dump(dataset.idx_to_class, idx_to_class_file) with", "os.getenv(\"INDEX_TYPE\", 'celebs.index') def collate_fn(x): return x[0] def downloadDirectoryFroms3(bucketName, remoteDirectoryName): s3_resource = boto3.resource('s3', aws_access_key_id='<KEY>',", "parser = argparse.ArgumentParser(description='Index which can be used to get names.') parser.add_argument('--port', type=int, default=5000,", "= bool(os.getenv(\"DEBUG_ENV\", False)) # PORT = int(os.getenv(\"PORT\", 5001)) # INDEX_TYPE = os.getenv(\"INDEX_TYPE\", 'celebs.index')", "data: cur = np.linalg.norm(emb - embedding) if cur < dist: dist = cur", "def build_indexes(): downloadDirectoryFroms3(BUCKET_NAME, REMOTE_DIRECTORY_NAME) sub = [os.path.join(REMOTE_DIRECTORY_NAME, o) for o in os.listdir(REMOTE_DIRECTORY_NAME) if", "emb, y in data: cur = np.linalg.norm(emb - embedding) if cur < dist:", "pickle.dump(kdtree, index_file) with open('indexes/' + dataset_folder.split('/')[-1] + '.data', 'wb') as data_file: pickle.dump(R, data_file)", "+ FACE_THRESHOLD: logger.info(\"Unknown face\") return \"Unknown face Similar to idx_to_class[closest]\" logger.info(idx_to_class[closest] + '", "= s3_resource.Bucket(bucketName) for object in bucket.objects.filter(Prefix=remoteDirectoryName): if not os.path.exists(os.path.dirname(object.key)): os.makedirs(os.path.dirname(object.key)) bucket.download_file(object.key, object.key) def", "embedding = embedding.replace(']', '') embedding = np.fromstring(embedding, dtype=float, sep=', ') closest = 0", "help='type of index') # parser.add_argument('--sum', dest='accumulate', action='store_const', # const=sum, default=max, # help='sum the", "as index_file: kdtree = pickle.load(index_file) with open('indexes/' + index_type.split('.')[0] + '.idx_to_class', 'rb') as", "object.key) def build_kd_tree(dataset_folder): logger.info(dataset_folder.split('/')[-1]) dataset = datasets.ImageFolder(dataset_folder) logger.info({i: c for c, i in", "dataset = datasets.ImageFolder(dataset_folder) logger.info({i: c for c, i in dataset.class_to_idx.items()}) dataset.idx_to_class = {i:", "= parser.parse_args() PORT = args.port INDEX_TYPE = args.index_type kdtree, idx_to_class, data = get_index(INDEX_TYPE)", "index_type, 'rb') as index_file: kdtree = pickle.load(index_file) with open('indexes/' + index_type.split('.')[0] + '.idx_to_class',", "import matplotlib.pyplot as plt import numpy as np from scipy.spatial import distance import", "for c, i in dataset.class_to_idx.items()}) dataset.idx_to_class = {i: c for c, i in", "number') parser.add_argument('--index_type', type=str, default='celebs.index', help='type of index') # parser.add_argument('--sum', dest='accumulate', action='store_const', # const=sum,", "not os.path.exists(os.path.dirname(object.key)): os.makedirs(os.path.dirname(object.key)) bucket.download_file(object.key, object.key) def build_kd_tree(dataset_folder): logger.info(dataset_folder.split('/')[-1]) dataset = datasets.ImageFolder(dataset_folder) logger.info({i: c", "'.index', 'wb') as index_file: pickle.dump(kdtree, index_file) with open('indexes/' + dataset_folder.split('/')[-1] + '.data', 'wb')", "collate_fn(x): return x[0] def downloadDirectoryFroms3(bucketName, remoteDirectoryName): s3_resource = boto3.resource('s3', aws_access_key_id='<KEY>', aws_secret_access_key='<KEY>') bucket =", "from torch.utils.data import DataLoader from torchvision import datasets import numpy as np import", "1 + FACE_THRESHOLD: logger.info(\"Unknown face\") return \"Unknown face Similar to idx_to_class[closest]\" logger.info(idx_to_class[closest] +", "config.getboolean('main', 'debug') EMBEDDING_SIZE = 512 BUCKET_NAME = 'info-ret-final-project' REMOTE_DIRECTORY_NAME = 'data' # DEBUG_ENV", "as index_file: pickle.dump(kdtree, index_file) with open('indexes/' + dataset_folder.split('/')[-1] + '.data', 'wb') as data_file:", "from scipy.spatial import distance import pickle from models.mtcnn import MTCNN from models.inception_resnet_v1 import", "None: embedding = embedding[0] R.append((embedding, y)) kdtree = Node(K=EMBEDDING_SIZE).build_kd_tree(R) with open('indexes/' + dataset_folder.split('/')[-1]", "[] for x, y in loader: embedding = embder.embed_one(x) if embedding is not", "import tqdm_notebook import matplotlib.pyplot as plt import numpy as np from scipy.spatial import", "config.read('config.ini') FACE_THRESHOLD = config.getfloat('main', 'face_threshold') METHOD = config.get('main', 'method') CUDA = config.getboolean('main', 'cuda')", "o) for o in os.listdir(REMOTE_DIRECTORY_NAME) if os.path.isdir(os.path.join(REMOTE_DIRECTORY_NAME, o))] for dataset_folder in sub: build_kd_tree(dataset_folder)", "embedding = embedding[0] R.append((embedding, y)) kdtree = Node(K=EMBEDDING_SIZE).build_kd_tree(R) with open('indexes/' + dataset_folder.split('/')[-1] +", "can be used to get names.') parser.add_argument('--port', type=int, default=5000, help='port number') parser.add_argument('--index_type', type=str,", "= pickle.load(idx_to_class_file) with open('indexes/' + index_type.split('.')[0] + '.data', 'rb') as data_file: data =", "which can be used to get names.') parser.add_argument('--port', type=int, default=5000, help='port number') parser.add_argument('--index_type',", "# help='sum the integers (default: find the max)') args = parser.parse_args() PORT =", "in dataset.class_to_idx.items()}) dataset.idx_to_class = {i: c for c, i in dataset.class_to_idx.items()} loader =", "= Flask(__name__) @app.route(\"/who_brute\", methods=[\"GET\"]) def get_brute_force(): embedding = request.args.get('embedding') embedding = embedding.replace('[', '')", "as np from scipy.spatial import distance import pickle from models.mtcnn import MTCNN from", "= get_index(INDEX_TYPE) # print(PORT, INDEX_TYPE) if not DEBUG_ENV: serve(app, host='0.0.0.0', port=PORT) else: app.run(debug=True,", "datasets import numpy as np import pandas as pd import os import pickle", "= np.fromstring(embedding, dtype=float, sep=', ') closest = 0 dist = np.inf for emb,", "with open('indexes/' + index_type.split('.')[0] + '.data', 'rb') as data_file: data = pickle.load(data_file) logger.info(idx_to_class)", "request, jsonify, Response from flask_cors import CORS from waitress import serve config =", "idx_to_class = pickle.load(idx_to_class_file) with open('indexes/' + index_type.split('.')[0] + '.data', 'rb') as data_file: data", "sys from configparser import ConfigParser import boto3 from loguru import logger from tqdm", "tqdm import tqdm_notebook import matplotlib.pyplot as plt import numpy as np from scipy.spatial", "pickle.load(data_file) logger.info(idx_to_class) return kdtree, idx_to_class, data app = Flask(__name__) @app.route(\"/who_brute\", methods=[\"GET\"]) def get_brute_force():", "'data' # DEBUG_ENV = bool(os.getenv(\"DEBUG_ENV\", False)) # PORT = int(os.getenv(\"PORT\", 5001)) # INDEX_TYPE", "flask_cors import CORS from waitress import serve config = ConfigParser() config.read('config.ini') FACE_THRESHOLD =", "import logger from tqdm import tqdm_notebook import matplotlib.pyplot as plt import numpy as", "config = ConfigParser() config.read('config.ini') FACE_THRESHOLD = config.getfloat('main', 'face_threshold') METHOD = config.get('main', 'method') CUDA", "= 0 dist = np.inf for emb, y in data: cur = np.linalg.norm(emb", "get_brute_force(): embedding = request.args.get('embedding') embedding = embedding.replace('[', '') embedding = embedding.replace(']', '') embedding", "torchvision import datasets import numpy as np import pandas as pd import os", "import Embedder from index import Node from flask import Flask, request, jsonify, Response", "from models.mtcnn import MTCNN from models.inception_resnet_v1 import InceptionResnetV1 import torch from torch.utils.data import", "get_name(): embedding = request.args.get('embedding') embedding = embedding.replace('[', '') embedding = embedding.replace(']', '') embedding", "parser.add_argument('--port', type=int, default=5000, help='port number') parser.add_argument('--index_type', type=str, default='celebs.index', help='type of index') # parser.add_argument('--sum',", "embder = Embedder() R = [] for x, y in loader: embedding =", "= [] for x, y in loader: embedding = embder.embed_one(x) if embedding is", "from flask import Flask, request, jsonify, Response from flask_cors import CORS from waitress", "embedding[0] R.append((embedding, y)) kdtree = Node(K=EMBEDDING_SIZE).build_kd_tree(R) with open('indexes/' + dataset_folder.split('/')[-1] + '.idx_to_class', 'wb')", "return idx_to_class[kdtree.get_nn(embedding, 1)[0][1]] CORS(app) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='Index which can", "'method') CUDA = config.getboolean('main', 'cuda') DEBUG_ENV = config.getboolean('main', 'debug') EMBEDDING_SIZE = 512 BUCKET_NAME", "# PORT = int(os.getenv(\"PORT\", 5001)) # INDEX_TYPE = os.getenv(\"INDEX_TYPE\", 'celebs.index') def collate_fn(x): return", "= cur closest = y if closest > 1 + FACE_THRESHOLD: logger.info(\"Unknown face\")", "embedding.replace(']', '') embedding = np.fromstring(embedding, dtype=float, sep=', ') return idx_to_class[kdtree.get_nn(embedding, 1)[0][1]] CORS(app) if", "import distance import pickle from models.mtcnn import MTCNN from models.inception_resnet_v1 import InceptionResnetV1 import", "def downloadDirectoryFroms3(bucketName, remoteDirectoryName): s3_resource = boto3.resource('s3', aws_access_key_id='<KEY>', aws_secret_access_key='<KEY>') bucket = s3_resource.Bucket(bucketName) for object", "dataset.idx_to_class = {i: c for c, i in dataset.class_to_idx.items()} loader = DataLoader(dataset, collate_fn=collate_fn,", "the integers (default: find the max)') args = parser.parse_args() PORT = args.port INDEX_TYPE", "CORS(app) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='Index which can be used to", "@app.route(\"/who_brute\", methods=[\"GET\"]) def get_brute_force(): embedding = request.args.get('embedding') embedding = embedding.replace('[', '') embedding =", "import pickle from models.mtcnn import MTCNN from models.inception_resnet_v1 import InceptionResnetV1 import torch from", "Embedder from index import Node from flask import Flask, request, jsonify, Response from", "os.makedirs(os.path.dirname(object.key)) bucket.download_file(object.key, object.key) def build_kd_tree(dataset_folder): logger.info(dataset_folder.split('/')[-1]) dataset = datasets.ImageFolder(dataset_folder) logger.info({i: c for c,", "'.data', 'rb') as data_file: data = pickle.load(data_file) logger.info(idx_to_class) return kdtree, idx_to_class, data app", "= embder.embed_one(x) if embedding is not None: embedding = embedding[0] R.append((embedding, y)) kdtree", "# const=sum, default=max, # help='sum the integers (default: find the max)') args =", "be used to get names.') parser.add_argument('--port', type=int, default=5000, help='port number') parser.add_argument('--index_type', type=str, default='celebs.index',", "= boto3.resource('s3', aws_access_key_id='<KEY>', aws_secret_access_key='<KEY>') bucket = s3_resource.Bucket(bucketName) for object in bucket.objects.filter(Prefix=remoteDirectoryName): if not", "dataset_folder.split('/')[-1] + '.data', 'wb') as data_file: pickle.dump(R, data_file) def build_indexes(): downloadDirectoryFroms3(BUCKET_NAME, REMOTE_DIRECTORY_NAME) sub", "'cuda') DEBUG_ENV = config.getboolean('main', 'debug') EMBEDDING_SIZE = 512 BUCKET_NAME = 'info-ret-final-project' REMOTE_DIRECTORY_NAME =", "= 'data' # DEBUG_ENV = bool(os.getenv(\"DEBUG_ENV\", False)) # PORT = int(os.getenv(\"PORT\", 5001)) #", "pickle from models.mtcnn import MTCNN from models.inception_resnet_v1 import InceptionResnetV1 import torch from torch.utils.data", "logger from tqdm import tqdm_notebook import matplotlib.pyplot as plt import numpy as np", "dist: dist = cur closest = y if closest > 1 + FACE_THRESHOLD:", "const=sum, default=max, # help='sum the integers (default: find the max)') args = parser.parse_args()", "open('indexes/' + index_type.split('.')[0] + '.idx_to_class', 'rb') as idx_to_class_file: idx_to_class = pickle.load(idx_to_class_file) with open('indexes/'", "embder.embed_one(x) if embedding is not None: embedding = embedding[0] R.append((embedding, y)) kdtree =", "idx_to_class, data app = Flask(__name__) @app.route(\"/who_brute\", methods=[\"GET\"]) def get_brute_force(): embedding = request.args.get('embedding') embedding", "= np.linalg.norm(emb - embedding) if cur < dist: dist = cur closest =", "boto3.resource('s3', aws_access_key_id='<KEY>', aws_secret_access_key='<KEY>') bucket = s3_resource.Bucket(bucketName) for object in bucket.objects.filter(Prefix=remoteDirectoryName): if not os.path.exists(os.path.dirname(object.key)):", "512 BUCKET_NAME = 'info-ret-final-project' REMOTE_DIRECTORY_NAME = 'data' # DEBUG_ENV = bool(os.getenv(\"DEBUG_ENV\", False)) #", "max)') args = parser.parse_args() PORT = args.port INDEX_TYPE = args.index_type kdtree, idx_to_class, data", "distance import pickle from models.mtcnn import MTCNN from models.inception_resnet_v1 import InceptionResnetV1 import torch", "= config.getboolean('main', 'cuda') DEBUG_ENV = config.getboolean('main', 'debug') EMBEDDING_SIZE = 512 BUCKET_NAME = 'info-ret-final-project'", "'info-ret-final-project' REMOTE_DIRECTORY_NAME = 'data' # DEBUG_ENV = bool(os.getenv(\"DEBUG_ENV\", False)) # PORT = int(os.getenv(\"PORT\",", "+ '.data', 'wb') as data_file: pickle.dump(R, data_file) def build_indexes(): downloadDirectoryFroms3(BUCKET_NAME, REMOTE_DIRECTORY_NAME) sub =", "logger.info(idx_to_class) return kdtree, idx_to_class, data app = Flask(__name__) @app.route(\"/who_brute\", methods=[\"GET\"]) def get_brute_force(): embedding", "= embedding.replace('[', '') embedding = embedding.replace(']', '') embedding = np.fromstring(embedding, dtype=float, sep=', ')", "dist = cur closest = y if closest > 1 + FACE_THRESHOLD: logger.info(\"Unknown", "index_file: kdtree = pickle.load(index_file) with open('indexes/' + index_type.split('.')[0] + '.idx_to_class', 'rb') as idx_to_class_file:", "of index') # parser.add_argument('--sum', dest='accumulate', action='store_const', # const=sum, default=max, # help='sum the integers", "'wb') as data_file: pickle.dump(R, data_file) def build_indexes(): downloadDirectoryFroms3(BUCKET_NAME, REMOTE_DIRECTORY_NAME) sub = [os.path.join(REMOTE_DIRECTORY_NAME, o)", "from configparser import ConfigParser import boto3 from loguru import logger from tqdm import", "'.data', 'wb') as data_file: pickle.dump(R, data_file) def build_indexes(): downloadDirectoryFroms3(BUCKET_NAME, REMOTE_DIRECTORY_NAME) sub = [os.path.join(REMOTE_DIRECTORY_NAME,", "cur = np.linalg.norm(emb - embedding) if cur < dist: dist = cur closest", "{i: c for c, i in dataset.class_to_idx.items()} loader = DataLoader(dataset, collate_fn=collate_fn, num_workers=4) embder", "Flask, request, jsonify, Response from flask_cors import CORS from waitress import serve config", "bucket = s3_resource.Bucket(bucketName) for object in bucket.objects.filter(Prefix=remoteDirectoryName): if not os.path.exists(os.path.dirname(object.key)): os.makedirs(os.path.dirname(object.key)) bucket.download_file(object.key, object.key)", "numpy as np from scipy.spatial import distance import pickle from models.mtcnn import MTCNN", "parser.parse_args() PORT = args.port INDEX_TYPE = args.index_type kdtree, idx_to_class, data = get_index(INDEX_TYPE) #", "+ dataset_folder.split('/')[-1] + '.idx_to_class', 'wb') as idx_to_class_file: pickle.dump(dataset.idx_to_class, idx_to_class_file) with open('indexes/' + dataset_folder.split('/')[-1]", "help='port number') parser.add_argument('--index_type', type=str, default='celebs.index', help='type of index') # parser.add_argument('--sum', dest='accumulate', action='store_const', #", "from flask_cors import CORS from waitress import serve config = ConfigParser() config.read('config.ini') FACE_THRESHOLD", "for dataset_folder in sub: build_kd_tree(dataset_folder) def get_index(index_type): if index_type == 'celebs.index': index_type =", "= os.getenv(\"INDEX_TYPE\", 'celebs.index') def collate_fn(x): return x[0] def downloadDirectoryFroms3(bucketName, remoteDirectoryName): s3_resource = boto3.resource('s3',", "in sub: build_kd_tree(dataset_folder) def get_index(index_type): if index_type == 'celebs.index': index_type = 'added_.index' with", "embedding.replace(']', '') embedding = np.fromstring(embedding, dtype=float, sep=', ') closest = 0 dist =", "0 dist = np.inf for emb, y in data: cur = np.linalg.norm(emb -", "configparser import ConfigParser import boto3 from loguru import logger from tqdm import tqdm_notebook", "'rb') as data_file: data = pickle.load(data_file) logger.info(idx_to_class) return kdtree, idx_to_class, data app =", "cur < dist: dist = cur closest = y if closest > 1", "= embedding.replace(']', '') embedding = np.fromstring(embedding, dtype=float, sep=', ') return idx_to_class[kdtree.get_nn(embedding, 1)[0][1]] CORS(app)", "parser.add_argument('--index_type', type=str, default='celebs.index', help='type of index') # parser.add_argument('--sum', dest='accumulate', action='store_const', # const=sum, default=max,", "y in data: cur = np.linalg.norm(emb - embedding) if cur < dist: dist", "' + str(dist)) return idx_to_class[closest] @app.route(\"/who_tree\", methods=[\"GET\"]) def get_name(): embedding = request.args.get('embedding') embedding", "dist = np.inf for emb, y in data: cur = np.linalg.norm(emb - embedding)", "build_indexes(): downloadDirectoryFroms3(BUCKET_NAME, REMOTE_DIRECTORY_NAME) sub = [os.path.join(REMOTE_DIRECTORY_NAME, o) for o in os.listdir(REMOTE_DIRECTORY_NAME) if os.path.isdir(os.path.join(REMOTE_DIRECTORY_NAME,", "models.inception_resnet_v1 import InceptionResnetV1 import torch from torch.utils.data import DataLoader from torchvision import datasets", "if cur < dist: dist = cur closest = y if closest >", "if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='Index which can be used to get", "EMBEDDING_SIZE = 512 BUCKET_NAME = 'info-ret-final-project' REMOTE_DIRECTORY_NAME = 'data' # DEBUG_ENV = bool(os.getenv(\"DEBUG_ENV\",", "build_kd_tree(dataset_folder) def get_index(index_type): if index_type == 'celebs.index': index_type = 'added_.index' with open('indexes/' +", "with open('indexes/' + index_type, 'rb') as index_file: kdtree = pickle.load(index_file) with open('indexes/' +", "'wb') as index_file: pickle.dump(kdtree, index_file) with open('indexes/' + dataset_folder.split('/')[-1] + '.data', 'wb') as", "data_file: pickle.dump(R, data_file) def build_indexes(): downloadDirectoryFroms3(BUCKET_NAME, REMOTE_DIRECTORY_NAME) sub = [os.path.join(REMOTE_DIRECTORY_NAME, o) for o", "(default: find the max)') args = parser.parse_args() PORT = args.port INDEX_TYPE = args.index_type", "pd import os import pickle from embeddings import Embedder from index import Node", "in os.listdir(REMOTE_DIRECTORY_NAME) if os.path.isdir(os.path.join(REMOTE_DIRECTORY_NAME, o))] for dataset_folder in sub: build_kd_tree(dataset_folder) def get_index(index_type): if", "+ '.index', 'wb') as index_file: pickle.dump(kdtree, index_file) with open('indexes/' + dataset_folder.split('/')[-1] + '.data',", "np.inf for emb, y in data: cur = np.linalg.norm(emb - embedding) if cur", "embedding) if cur < dist: dist = cur closest = y if closest", "in data: cur = np.linalg.norm(emb - embedding) if cur < dist: dist =", "num_workers=4) embder = Embedder() R = [] for x, y in loader: embedding", "embedding = request.args.get('embedding') embedding = embedding.replace('[', '') embedding = embedding.replace(']', '') embedding =", "s3_resource = boto3.resource('s3', aws_access_key_id='<KEY>', aws_secret_access_key='<KEY>') bucket = s3_resource.Bucket(bucketName) for object in bucket.objects.filter(Prefix=remoteDirectoryName): if", "used to get names.') parser.add_argument('--port', type=int, default=5000, help='port number') parser.add_argument('--index_type', type=str, default='celebs.index', help='type", "= [os.path.join(REMOTE_DIRECTORY_NAME, o) for o in os.listdir(REMOTE_DIRECTORY_NAME) if os.path.isdir(os.path.join(REMOTE_DIRECTORY_NAME, o))] for dataset_folder in", "data = get_index(INDEX_TYPE) # print(PORT, INDEX_TYPE) if not DEBUG_ENV: serve(app, host='0.0.0.0', port=PORT) else:", "open('indexes/' + index_type, 'rb') as index_file: kdtree = pickle.load(index_file) with open('indexes/' + index_type.split('.')[0]", "= 'added_.index' with open('indexes/' + index_type, 'rb') as index_file: kdtree = pickle.load(index_file) with", "= np.inf for emb, y in data: cur = np.linalg.norm(emb - embedding) if", "face Similar to idx_to_class[closest]\" logger.info(idx_to_class[closest] + ' ' + str(dist)) return idx_to_class[closest] @app.route(\"/who_tree\",", "= embedding.replace(']', '') embedding = np.fromstring(embedding, dtype=float, sep=', ') closest = 0 dist", "index_type.split('.')[0] + '.idx_to_class', 'rb') as idx_to_class_file: idx_to_class = pickle.load(idx_to_class_file) with open('indexes/' + index_type.split('.')[0]", "DEBUG_ENV = config.getboolean('main', 'debug') EMBEDDING_SIZE = 512 BUCKET_NAME = 'info-ret-final-project' REMOTE_DIRECTORY_NAME = 'data'", "dataset_folder.split('/')[-1] + '.idx_to_class', 'wb') as idx_to_class_file: pickle.dump(dataset.idx_to_class, idx_to_class_file) with open('indexes/' + dataset_folder.split('/')[-1] +", "scipy.spatial import distance import pickle from models.mtcnn import MTCNN from models.inception_resnet_v1 import InceptionResnetV1", "= request.args.get('embedding') embedding = embedding.replace('[', '') embedding = embedding.replace(']', '') embedding = np.fromstring(embedding,", "Embedder() R = [] for x, y in loader: embedding = embder.embed_one(x) if", "pickle.load(idx_to_class_file) with open('indexes/' + index_type.split('.')[0] + '.data', 'rb') as data_file: data = pickle.load(data_file)", "remoteDirectoryName): s3_resource = boto3.resource('s3', aws_access_key_id='<KEY>', aws_secret_access_key='<KEY>') bucket = s3_resource.Bucket(bucketName) for object in bucket.objects.filter(Prefix=remoteDirectoryName):", "np import pandas as pd import os import pickle from embeddings import Embedder", "= argparse.ArgumentParser(description='Index which can be used to get names.') parser.add_argument('--port', type=int, default=5000, help='port", "PORT = int(os.getenv(\"PORT\", 5001)) # INDEX_TYPE = os.getenv(\"INDEX_TYPE\", 'celebs.index') def collate_fn(x): return x[0]", "closest = 0 dist = np.inf for emb, y in data: cur =", "torch.utils.data import DataLoader from torchvision import datasets import numpy as np import pandas", "np.fromstring(embedding, dtype=float, sep=', ') closest = 0 dist = np.inf for emb, y", "as data_file: data = pickle.load(data_file) logger.info(idx_to_class) return kdtree, idx_to_class, data app = Flask(__name__)", "args.port INDEX_TYPE = args.index_type kdtree, idx_to_class, data = get_index(INDEX_TYPE) # print(PORT, INDEX_TYPE) if", "+ '.idx_to_class', 'rb') as idx_to_class_file: idx_to_class = pickle.load(idx_to_class_file) with open('indexes/' + index_type.split('.')[0] +", "= ConfigParser() config.read('config.ini') FACE_THRESHOLD = config.getfloat('main', 'face_threshold') METHOD = config.get('main', 'method') CUDA =", "+ '.idx_to_class', 'wb') as idx_to_class_file: pickle.dump(dataset.idx_to_class, idx_to_class_file) with open('indexes/' + dataset_folder.split('/')[-1] + '.index',", "idx_to_class[closest]\" logger.info(idx_to_class[closest] + ' ' + str(dist)) return idx_to_class[closest] @app.route(\"/who_tree\", methods=[\"GET\"]) def get_name():", "loguru import logger from tqdm import tqdm_notebook import matplotlib.pyplot as plt import numpy", "index_file: pickle.dump(kdtree, index_file) with open('indexes/' + dataset_folder.split('/')[-1] + '.data', 'wb') as data_file: pickle.dump(R,", "= np.fromstring(embedding, dtype=float, sep=', ') return idx_to_class[kdtree.get_nn(embedding, 1)[0][1]] CORS(app) if __name__ == \"__main__\":", "open('indexes/' + dataset_folder.split('/')[-1] + '.idx_to_class', 'wb') as idx_to_class_file: pickle.dump(dataset.idx_to_class, idx_to_class_file) with open('indexes/' +", "app = Flask(__name__) @app.route(\"/who_brute\", methods=[\"GET\"]) def get_brute_force(): embedding = request.args.get('embedding') embedding = embedding.replace('[',", "get_index(INDEX_TYPE) # print(PORT, INDEX_TYPE) if not DEBUG_ENV: serve(app, host='0.0.0.0', port=PORT) else: app.run(debug=True, host='0.0.0.0',", "def get_index(index_type): if index_type == 'celebs.index': index_type = 'added_.index' with open('indexes/' + index_type,", "from waitress import serve config = ConfigParser() config.read('config.ini') FACE_THRESHOLD = config.getfloat('main', 'face_threshold') METHOD", "return \"Unknown face Similar to idx_to_class[closest]\" logger.info(idx_to_class[closest] + ' ' + str(dist)) return", "y in loader: embedding = embder.embed_one(x) if embedding is not None: embedding =", "__name__ == \"__main__\": parser = argparse.ArgumentParser(description='Index which can be used to get names.')", "kdtree = Node(K=EMBEDDING_SIZE).build_kd_tree(R) with open('indexes/' + dataset_folder.split('/')[-1] + '.idx_to_class', 'wb') as idx_to_class_file: pickle.dump(dataset.idx_to_class,", "index_type.split('.')[0] + '.data', 'rb') as data_file: data = pickle.load(data_file) logger.info(idx_to_class) return kdtree, idx_to_class,", "bucket.download_file(object.key, object.key) def build_kd_tree(dataset_folder): logger.info(dataset_folder.split('/')[-1]) dataset = datasets.ImageFolder(dataset_folder) logger.info({i: c for c, i", "' ' + str(dist)) return idx_to_class[closest] @app.route(\"/who_tree\", methods=[\"GET\"]) def get_name(): embedding = request.args.get('embedding')", "y if closest > 1 + FACE_THRESHOLD: logger.info(\"Unknown face\") return \"Unknown face Similar", "open('indexes/' + dataset_folder.split('/')[-1] + '.index', 'wb') as index_file: pickle.dump(kdtree, index_file) with open('indexes/' +", "+ str(dist)) return idx_to_class[closest] @app.route(\"/who_tree\", methods=[\"GET\"]) def get_name(): embedding = request.args.get('embedding') embedding =", "= pickle.load(index_file) with open('indexes/' + index_type.split('.')[0] + '.idx_to_class', 'rb') as idx_to_class_file: idx_to_class =", "aws_access_key_id='<KEY>', aws_secret_access_key='<KEY>') bucket = s3_resource.Bucket(bucketName) for object in bucket.objects.filter(Prefix=remoteDirectoryName): if not os.path.exists(os.path.dirname(object.key)): os.makedirs(os.path.dirname(object.key))", "for o in os.listdir(REMOTE_DIRECTORY_NAME) if os.path.isdir(os.path.join(REMOTE_DIRECTORY_NAME, o))] for dataset_folder in sub: build_kd_tree(dataset_folder) def", "+ index_type.split('.')[0] + '.idx_to_class', 'rb') as idx_to_class_file: idx_to_class = pickle.load(idx_to_class_file) with open('indexes/' +", "Node from flask import Flask, request, jsonify, Response from flask_cors import CORS from", "np.linalg.norm(emb - embedding) if cur < dist: dist = cur closest = y", "type=int, default=5000, help='port number') parser.add_argument('--index_type', type=str, default='celebs.index', help='type of index') # parser.add_argument('--sum', dest='accumulate',", "+ dataset_folder.split('/')[-1] + '.data', 'wb') as data_file: pickle.dump(R, data_file) def build_indexes(): downloadDirectoryFroms3(BUCKET_NAME, REMOTE_DIRECTORY_NAME)", "request.args.get('embedding') embedding = embedding.replace('[', '') embedding = embedding.replace(']', '') embedding = np.fromstring(embedding, dtype=float,", "tqdm_notebook import matplotlib.pyplot as plt import numpy as np from scipy.spatial import distance", "as plt import numpy as np from scipy.spatial import distance import pickle from", "is not None: embedding = embedding[0] R.append((embedding, y)) kdtree = Node(K=EMBEDDING_SIZE).build_kd_tree(R) with open('indexes/'", "import random, time, sys from configparser import ConfigParser import boto3 from loguru import", "'rb') as idx_to_class_file: idx_to_class = pickle.load(idx_to_class_file) with open('indexes/' + index_type.split('.')[0] + '.data', 'rb')", "False)) # PORT = int(os.getenv(\"PORT\", 5001)) # INDEX_TYPE = os.getenv(\"INDEX_TYPE\", 'celebs.index') def collate_fn(x):", "if embedding is not None: embedding = embedding[0] R.append((embedding, y)) kdtree = Node(K=EMBEDDING_SIZE).build_kd_tree(R)", "plt import numpy as np from scipy.spatial import distance import pickle from models.mtcnn", "logger.info(idx_to_class[closest] + ' ' + str(dist)) return idx_to_class[closest] @app.route(\"/who_tree\", methods=[\"GET\"]) def get_name(): embedding", "from loguru import logger from tqdm import tqdm_notebook import matplotlib.pyplot as plt import", "for emb, y in data: cur = np.linalg.norm(emb - embedding) if cur <", "os.path.exists(os.path.dirname(object.key)): os.makedirs(os.path.dirname(object.key)) bucket.download_file(object.key, object.key) def build_kd_tree(dataset_folder): logger.info(dataset_folder.split('/')[-1]) dataset = datasets.ImageFolder(dataset_folder) logger.info({i: c for", "def collate_fn(x): return x[0] def downloadDirectoryFroms3(bucketName, remoteDirectoryName): s3_resource = boto3.resource('s3', aws_access_key_id='<KEY>', aws_secret_access_key='<KEY>') bucket", "in dataset.class_to_idx.items()} loader = DataLoader(dataset, collate_fn=collate_fn, num_workers=4) embder = Embedder() R = []", "from index import Node from flask import Flask, request, jsonify, Response from flask_cors", "def build_kd_tree(dataset_folder): logger.info(dataset_folder.split('/')[-1]) dataset = datasets.ImageFolder(dataset_folder) logger.info({i: c for c, i in dataset.class_to_idx.items()})", "c for c, i in dataset.class_to_idx.items()}) dataset.idx_to_class = {i: c for c, i", "as np import pandas as pd import os import pickle from embeddings import", "dataset_folder in sub: build_kd_tree(dataset_folder) def get_index(index_type): if index_type == 'celebs.index': index_type = 'added_.index'", "bool(os.getenv(\"DEBUG_ENV\", False)) # PORT = int(os.getenv(\"PORT\", 5001)) # INDEX_TYPE = os.getenv(\"INDEX_TYPE\", 'celebs.index') def", "import CORS from waitress import serve config = ConfigParser() config.read('config.ini') FACE_THRESHOLD = config.getfloat('main',", "= embedding[0] R.append((embedding, y)) kdtree = Node(K=EMBEDDING_SIZE).build_kd_tree(R) with open('indexes/' + dataset_folder.split('/')[-1] + '.idx_to_class',", "closest > 1 + FACE_THRESHOLD: logger.info(\"Unknown face\") return \"Unknown face Similar to idx_to_class[closest]\"", "argparse import random, time, sys from configparser import ConfigParser import boto3 from loguru", "open('indexes/' + dataset_folder.split('/')[-1] + '.data', 'wb') as data_file: pickle.dump(R, data_file) def build_indexes(): downloadDirectoryFroms3(BUCKET_NAME,", "= 512 BUCKET_NAME = 'info-ret-final-project' REMOTE_DIRECTORY_NAME = 'data' # DEBUG_ENV = bool(os.getenv(\"DEBUG_ENV\", False))", "pickle.dump(R, data_file) def build_indexes(): downloadDirectoryFroms3(BUCKET_NAME, REMOTE_DIRECTORY_NAME) sub = [os.path.join(REMOTE_DIRECTORY_NAME, o) for o in", "i in dataset.class_to_idx.items()}) dataset.idx_to_class = {i: c for c, i in dataset.class_to_idx.items()} loader", "= args.index_type kdtree, idx_to_class, data = get_index(INDEX_TYPE) # print(PORT, INDEX_TYPE) if not DEBUG_ENV:", "import Node from flask import Flask, request, jsonify, Response from flask_cors import CORS", "logger.info(\"Unknown face\") return \"Unknown face Similar to idx_to_class[closest]\" logger.info(idx_to_class[closest] + ' ' +", "# INDEX_TYPE = os.getenv(\"INDEX_TYPE\", 'celebs.index') def collate_fn(x): return x[0] def downloadDirectoryFroms3(bucketName, remoteDirectoryName): s3_resource", "boto3 from loguru import logger from tqdm import tqdm_notebook import matplotlib.pyplot as plt", "= int(os.getenv(\"PORT\", 5001)) # INDEX_TYPE = os.getenv(\"INDEX_TYPE\", 'celebs.index') def collate_fn(x): return x[0] def", "data app = Flask(__name__) @app.route(\"/who_brute\", methods=[\"GET\"]) def get_brute_force(): embedding = request.args.get('embedding') embedding =", "not None: embedding = embedding[0] R.append((embedding, y)) kdtree = Node(K=EMBEDDING_SIZE).build_kd_tree(R) with open('indexes/' +", "InceptionResnetV1 import torch from torch.utils.data import DataLoader from torchvision import datasets import numpy", "waitress import serve config = ConfigParser() config.read('config.ini') FACE_THRESHOLD = config.getfloat('main', 'face_threshold') METHOD =", "for c, i in dataset.class_to_idx.items()} loader = DataLoader(dataset, collate_fn=collate_fn, num_workers=4) embder = Embedder()", "'') embedding = np.fromstring(embedding, dtype=float, sep=', ') return idx_to_class[kdtree.get_nn(embedding, 1)[0][1]] CORS(app) if __name__", "'') embedding = embedding.replace(']', '') embedding = np.fromstring(embedding, dtype=float, sep=', ') closest =", "BUCKET_NAME = 'info-ret-final-project' REMOTE_DIRECTORY_NAME = 'data' # DEBUG_ENV = bool(os.getenv(\"DEBUG_ENV\", False)) # PORT", "dataset.class_to_idx.items()} loader = DataLoader(dataset, collate_fn=collate_fn, num_workers=4) embder = Embedder() R = [] for", "logger.info({i: c for c, i in dataset.class_to_idx.items()}) dataset.idx_to_class = {i: c for c,", "index import Node from flask import Flask, request, jsonify, Response from flask_cors import", "numpy as np import pandas as pd import os import pickle from embeddings", "embedding = embder.embed_one(x) if embedding is not None: embedding = embedding[0] R.append((embedding, y))", "build_kd_tree(dataset_folder): logger.info(dataset_folder.split('/')[-1]) dataset = datasets.ImageFolder(dataset_folder) logger.info({i: c for c, i in dataset.class_to_idx.items()}) dataset.idx_to_class", "= {i: c for c, i in dataset.class_to_idx.items()} loader = DataLoader(dataset, collate_fn=collate_fn, num_workers=4)", "kdtree = pickle.load(index_file) with open('indexes/' + index_type.split('.')[0] + '.idx_to_class', 'rb') as idx_to_class_file: idx_to_class", "import Flask, request, jsonify, Response from flask_cors import CORS from waitress import serve", "if closest > 1 + FACE_THRESHOLD: logger.info(\"Unknown face\") return \"Unknown face Similar to", "\"__main__\": parser = argparse.ArgumentParser(description='Index which can be used to get names.') parser.add_argument('--port', type=int,", "sep=', ') return idx_to_class[kdtree.get_nn(embedding, 1)[0][1]] CORS(app) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='Index", "= pickle.load(data_file) logger.info(idx_to_class) return kdtree, idx_to_class, data app = Flask(__name__) @app.route(\"/who_brute\", methods=[\"GET\"]) def", "as idx_to_class_file: pickle.dump(dataset.idx_to_class, idx_to_class_file) with open('indexes/' + dataset_folder.split('/')[-1] + '.index', 'wb') as index_file:", "sub: build_kd_tree(dataset_folder) def get_index(index_type): if index_type == 'celebs.index': index_type = 'added_.index' with open('indexes/'", "np.fromstring(embedding, dtype=float, sep=', ') return idx_to_class[kdtree.get_nn(embedding, 1)[0][1]] CORS(app) if __name__ == \"__main__\": parser", "kdtree, idx_to_class, data = get_index(INDEX_TYPE) # print(PORT, INDEX_TYPE) if not DEBUG_ENV: serve(app, host='0.0.0.0',", "1)[0][1]] CORS(app) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='Index which can be used", "loader: embedding = embder.embed_one(x) if embedding is not None: embedding = embedding[0] R.append((embedding,", "'added_.index' with open('indexes/' + index_type, 'rb') as index_file: kdtree = pickle.load(index_file) with open('indexes/'", "in loader: embedding = embder.embed_one(x) if embedding is not None: embedding = embedding[0]", "embedding = embedding.replace('[', '') embedding = embedding.replace(']', '') embedding = np.fromstring(embedding, dtype=float, sep=',", "+ '.data', 'rb') as data_file: data = pickle.load(data_file) logger.info(idx_to_class) return kdtree, idx_to_class, data", "collate_fn=collate_fn, num_workers=4) embder = Embedder() R = [] for x, y in loader:", "sep=', ') closest = 0 dist = np.inf for emb, y in data:", "default='celebs.index', help='type of index') # parser.add_argument('--sum', dest='accumulate', action='store_const', # const=sum, default=max, # help='sum", "c, i in dataset.class_to_idx.items()} loader = DataLoader(dataset, collate_fn=collate_fn, num_workers=4) embder = Embedder() R", "> 1 + FACE_THRESHOLD: logger.info(\"Unknown face\") return \"Unknown face Similar to idx_to_class[closest]\" logger.info(idx_to_class[closest]", "integers (default: find the max)') args = parser.parse_args() PORT = args.port INDEX_TYPE =", "Flask(__name__) @app.route(\"/who_brute\", methods=[\"GET\"]) def get_brute_force(): embedding = request.args.get('embedding') embedding = embedding.replace('[', '') embedding" ]
[ "from django.contrib import admin from .models import Concert # Register your models here.", "import Concert # Register your models here. class ConcertAdmin(admin.ModelAdmin): readonly_fields = ('created', 'updated')", "from .models import Concert # Register your models here. class ConcertAdmin(admin.ModelAdmin): readonly_fields =", "# Register your models here. class ConcertAdmin(admin.ModelAdmin): readonly_fields = ('created', 'updated') admin.site.register(Concert, ConcertAdmin)", "Concert # Register your models here. class ConcertAdmin(admin.ModelAdmin): readonly_fields = ('created', 'updated') admin.site.register(Concert,", "django.contrib import admin from .models import Concert # Register your models here. class", ".models import Concert # Register your models here. class ConcertAdmin(admin.ModelAdmin): readonly_fields = ('created',", "import admin from .models import Concert # Register your models here. class ConcertAdmin(admin.ModelAdmin):", "admin from .models import Concert # Register your models here. class ConcertAdmin(admin.ModelAdmin): readonly_fields" ]
[ "between 2 points def distanceBetween(point1,point2): return math.pow((math.pow(point1[0] - point2[0],2) + math.pow(point1[1] - point2[1],2)),0.5)", "as np import math from operator import itemgetter import sys #This program is", "numNeighbors: classCount = np.zeros(numClasses) for i in range(0,numNeighbors): temp = distanceAndLocation[i] classCount[temp[2]] =", "= len(Xtrain) numNeighbors = 5 numClasses = 3 #Classes have to be labeled", "sense of how the the algorithm chooses the closest #neighbors to a particular", "return math.pow((math.pow(point1[0] - point2[0],2) + math.pow(point1[1] - point2[1],2)),0.5) #Visualize these data points on", "It's not particularly #optimized in any way but it does give a sense", "be labeled starting from 0...numCLasses - 1 Xtest = [2,1.2] minDistance = sys.maxint", "Ytrain[x]]) distanceAndLocation = sorted(distanceAndLocation, key=itemgetter(1)) if len(distanceAndLocation) >= numNeighbors: classCount = np.zeros(numClasses) for", "#optimized in any way but it does give a sense of how the", "for x in range(0,numTrainExamples): distance = distanceBetween(Xtrain[x],Xtest) distanceAndLocation.append([Xtrain[x], distance, Ytrain[x]]) distanceAndLocation = sorted(distanceAndLocation,", "key=itemgetter(1)) if len(distanceAndLocation) >= numNeighbors: classCount = np.zeros(numClasses) for i in range(0,numNeighbors): temp", "classCount[temp[2]] = classCount[temp[2]] + 1 maxCount = 0 index = 0 for i", "program is just a rough reimplementation of k nearest neighbors in Python. It's", "how the the algorithm chooses the closest #neighbors to a particular test vector,", "and then how the output class is determined. #Distance between 2 points def", "to be labeled starting from 0...numCLasses - 1 Xtest = [2,1.2] minDistance =", "i in range(0,numNeighbors): temp = distanceAndLocation[i] classCount[temp[2]] = classCount[temp[2]] + 1 maxCount =", "a rough reimplementation of k nearest neighbors in Python. It's not particularly #optimized", "print classCount print index else: print 'Number of points less than number of", "have to be labeled starting from 0...numCLasses - 1 Xtest = [2,1.2] minDistance", "test vector, and then how the output class is determined. #Distance between 2", "x in range(0,numTrainExamples): distance = distanceBetween(Xtrain[x],Xtest) distanceAndLocation.append([Xtrain[x], distance, Ytrain[x]]) distanceAndLocation = sorted(distanceAndLocation, key=itemgetter(1))", "- point2[0],2) + math.pow(point1[1] - point2[1],2)),0.5) #Visualize these data points on an 4x4", "index = 0 for i in range(0,len(classCount)): if (classCount[i] > maxCount): maxCount =", "numTrainExamples = len(Xtrain) numNeighbors = 5 numClasses = 3 #Classes have to be", "__future__ import division import numpy as np import math from operator import itemgetter", "distance, Ytrain[x]]) distanceAndLocation = sorted(distanceAndLocation, key=itemgetter(1)) if len(distanceAndLocation) >= numNeighbors: classCount = np.zeros(numClasses)", "import math from operator import itemgetter import sys #This program is just a", "distanceAndLocation = [] for x in range(0,numTrainExamples): distance = distanceBetween(Xtrain[x],Xtest) distanceAndLocation.append([Xtrain[x], distance, Ytrain[x]])", "math.pow((math.pow(point1[0] - point2[0],2) + math.pow(point1[1] - point2[1],2)),0.5) #Visualize these data points on an", "1 maxCount = 0 index = 0 for i in range(0,len(classCount)): if (classCount[i]", "= 0 for i in range(0,len(classCount)): if (classCount[i] > maxCount): maxCount = classCount[i]", "it does give a sense of how the the algorithm chooses the closest", "for i in range(0,numNeighbors): temp = distanceAndLocation[i] classCount[temp[2]] = classCount[temp[2]] + 1 maxCount", "#Classes have to be labeled starting from 0...numCLasses - 1 Xtest = [2,1.2]", "classCount = np.zeros(numClasses) for i in range(0,numNeighbors): temp = distanceAndLocation[i] classCount[temp[2]] = classCount[temp[2]]", "points on an 4x4 xy graph Xtrain = [[3,1],[3,2],[1,2],[1,3],[4,4],[5,5],[5,7],[7,5],[8,8]] Ytrain = [2,1,2,1,1,0,0,0,1] #Hyperparamters", "is just a rough reimplementation of k nearest neighbors in Python. It's not", "len(distanceAndLocation) >= numNeighbors: classCount = np.zeros(numClasses) for i in range(0,numNeighbors): temp = distanceAndLocation[i]", "in Python. It's not particularly #optimized in any way but it does give", "np import math from operator import itemgetter import sys #This program is just", "classCount print index else: print 'Number of points less than number of neighbors'", "= sys.maxint distanceAndLocation = [] for x in range(0,numTrainExamples): distance = distanceBetween(Xtrain[x],Xtest) distanceAndLocation.append([Xtrain[x],", "a sense of how the the algorithm chooses the closest #neighbors to a", "distanceAndLocation = sorted(distanceAndLocation, key=itemgetter(1)) if len(distanceAndLocation) >= numNeighbors: classCount = np.zeros(numClasses) for i", "algorithm chooses the closest #neighbors to a particular test vector, and then how", "#Visualize these data points on an 4x4 xy graph Xtrain = [[3,1],[3,2],[1,2],[1,3],[4,4],[5,5],[5,7],[7,5],[8,8]] Ytrain", "sys.maxint distanceAndLocation = [] for x in range(0,numTrainExamples): distance = distanceBetween(Xtrain[x],Xtest) distanceAndLocation.append([Xtrain[x], distance,", "any way but it does give a sense of how the the algorithm", "from 0...numCLasses - 1 Xtest = [2,1.2] minDistance = sys.maxint distanceAndLocation = []", "closest #neighbors to a particular test vector, and then how the output class", "range(0,len(classCount)): if (classCount[i] > maxCount): maxCount = classCount[i] index = i print classCount", "distanceAndLocation[i] classCount[temp[2]] = classCount[temp[2]] + 1 maxCount = 0 index = 0 for", "chooses the closest #neighbors to a particular test vector, and then how the", "class is determined. #Distance between 2 points def distanceBetween(point1,point2): return math.pow((math.pow(point1[0] - point2[0],2)", "in range(0,numNeighbors): temp = distanceAndLocation[i] classCount[temp[2]] = classCount[temp[2]] + 1 maxCount = 0", "particular test vector, and then how the output class is determined. #Distance between", "- point2[1],2)),0.5) #Visualize these data points on an 4x4 xy graph Xtrain =", "import sys #This program is just a rough reimplementation of k nearest neighbors", "0 index = 0 for i in range(0,len(classCount)): if (classCount[i] > maxCount): maxCount", "0...numCLasses - 1 Xtest = [2,1.2] minDistance = sys.maxint distanceAndLocation = [] for", "range(0,numNeighbors): temp = distanceAndLocation[i] classCount[temp[2]] = classCount[temp[2]] + 1 maxCount = 0 index", "2 points def distanceBetween(point1,point2): return math.pow((math.pow(point1[0] - point2[0],2) + math.pow(point1[1] - point2[1],2)),0.5) #Visualize", "temp = distanceAndLocation[i] classCount[temp[2]] = classCount[temp[2]] + 1 maxCount = 0 index =", "for i in range(0,len(classCount)): if (classCount[i] > maxCount): maxCount = classCount[i] index =", "#neighbors to a particular test vector, and then how the output class is", "numNeighbors = 5 numClasses = 3 #Classes have to be labeled starting from", "classCount[i] index = i print classCount print index else: print 'Number of points", "= 5 numClasses = 3 #Classes have to be labeled starting from 0...numCLasses", "neighbors in Python. It's not particularly #optimized in any way but it does", "of k nearest neighbors in Python. It's not particularly #optimized in any way", "operator import itemgetter import sys #This program is just a rough reimplementation of", "#Hyperparamters numTrainExamples = len(Xtrain) numNeighbors = 5 numClasses = 3 #Classes have to", "distance = distanceBetween(Xtrain[x],Xtest) distanceAndLocation.append([Xtrain[x], distance, Ytrain[x]]) distanceAndLocation = sorted(distanceAndLocation, key=itemgetter(1)) if len(distanceAndLocation) >=", "#Distance between 2 points def distanceBetween(point1,point2): return math.pow((math.pow(point1[0] - point2[0],2) + math.pow(point1[1] -", "[] for x in range(0,numTrainExamples): distance = distanceBetween(Xtrain[x],Xtest) distanceAndLocation.append([Xtrain[x], distance, Ytrain[x]]) distanceAndLocation =", "is determined. #Distance between 2 points def distanceBetween(point1,point2): return math.pow((math.pow(point1[0] - point2[0],2) +", "- 1 Xtest = [2,1.2] minDistance = sys.maxint distanceAndLocation = [] for x", "= 3 #Classes have to be labeled starting from 0...numCLasses - 1 Xtest", "minDistance = sys.maxint distanceAndLocation = [] for x in range(0,numTrainExamples): distance = distanceBetween(Xtrain[x],Xtest)", "numClasses = 3 #Classes have to be labeled starting from 0...numCLasses - 1", "import division import numpy as np import math from operator import itemgetter import", "starting from 0...numCLasses - 1 Xtest = [2,1.2] minDistance = sys.maxint distanceAndLocation =", "np.zeros(numClasses) for i in range(0,numNeighbors): temp = distanceAndLocation[i] classCount[temp[2]] = classCount[temp[2]] + 1", "reimplementation of k nearest neighbors in Python. It's not particularly #optimized in any", "point2[0],2) + math.pow(point1[1] - point2[1],2)),0.5) #Visualize these data points on an 4x4 xy", "maxCount = 0 index = 0 for i in range(0,len(classCount)): if (classCount[i] >", "rough reimplementation of k nearest neighbors in Python. It's not particularly #optimized in", "a particular test vector, and then how the output class is determined. #Distance", "these data points on an 4x4 xy graph Xtrain = [[3,1],[3,2],[1,2],[1,3],[4,4],[5,5],[5,7],[7,5],[8,8]] Ytrain =", ">= numNeighbors: classCount = np.zeros(numClasses) for i in range(0,numNeighbors): temp = distanceAndLocation[i] classCount[temp[2]]", "import numpy as np import math from operator import itemgetter import sys #This", "how the output class is determined. #Distance between 2 points def distanceBetween(point1,point2): return", "does give a sense of how the the algorithm chooses the closest #neighbors", "k nearest neighbors in Python. It's not particularly #optimized in any way but", "data points on an 4x4 xy graph Xtrain = [[3,1],[3,2],[1,2],[1,3],[4,4],[5,5],[5,7],[7,5],[8,8]] Ytrain = [2,1,2,1,1,0,0,0,1]", "len(Xtrain) numNeighbors = 5 numClasses = 3 #Classes have to be labeled starting", "the algorithm chooses the closest #neighbors to a particular test vector, and then", "maxCount = classCount[i] index = i print classCount print index else: print 'Number", "if len(distanceAndLocation) >= numNeighbors: classCount = np.zeros(numClasses) for i in range(0,numNeighbors): temp =", "of how the the algorithm chooses the closest #neighbors to a particular test", "points def distanceBetween(point1,point2): return math.pow((math.pow(point1[0] - point2[0],2) + math.pow(point1[1] - point2[1],2)),0.5) #Visualize these", "+ math.pow(point1[1] - point2[1],2)),0.5) #Visualize these data points on an 4x4 xy graph", "math.pow(point1[1] - point2[1],2)),0.5) #Visualize these data points on an 4x4 xy graph Xtrain", "= classCount[i] index = i print classCount print index else: print 'Number of", "distanceBetween(point1,point2): return math.pow((math.pow(point1[0] - point2[0],2) + math.pow(point1[1] - point2[1],2)),0.5) #Visualize these data points", "> maxCount): maxCount = classCount[i] index = i print classCount print index else:", "from operator import itemgetter import sys #This program is just a rough reimplementation", "output class is determined. #Distance between 2 points def distanceBetween(point1,point2): return math.pow((math.pow(point1[0] -", "particularly #optimized in any way but it does give a sense of how", "= distanceAndLocation[i] classCount[temp[2]] = classCount[temp[2]] + 1 maxCount = 0 index = 0", "not particularly #optimized in any way but it does give a sense of", "= [2,1,2,1,1,0,0,0,1] #Hyperparamters numTrainExamples = len(Xtrain) numNeighbors = 5 numClasses = 3 #Classes", "i print classCount print index else: print 'Number of points less than number", "= [[3,1],[3,2],[1,2],[1,3],[4,4],[5,5],[5,7],[7,5],[8,8]] Ytrain = [2,1,2,1,1,0,0,0,1] #Hyperparamters numTrainExamples = len(Xtrain) numNeighbors = 5 numClasses", "[2,1,2,1,1,0,0,0,1] #Hyperparamters numTrainExamples = len(Xtrain) numNeighbors = 5 numClasses = 3 #Classes have", "def distanceBetween(point1,point2): return math.pow((math.pow(point1[0] - point2[0],2) + math.pow(point1[1] - point2[1],2)),0.5) #Visualize these data", "Ytrain = [2,1,2,1,1,0,0,0,1] #Hyperparamters numTrainExamples = len(Xtrain) numNeighbors = 5 numClasses = 3", "determined. #Distance between 2 points def distanceBetween(point1,point2): return math.pow((math.pow(point1[0] - point2[0],2) + math.pow(point1[1]", "on an 4x4 xy graph Xtrain = [[3,1],[3,2],[1,2],[1,3],[4,4],[5,5],[5,7],[7,5],[8,8]] Ytrain = [2,1,2,1,1,0,0,0,1] #Hyperparamters numTrainExamples", "1 Xtest = [2,1.2] minDistance = sys.maxint distanceAndLocation = [] for x in", "index = i print classCount print index else: print 'Number of points less", "= 0 index = 0 for i in range(0,len(classCount)): if (classCount[i] > maxCount):", "Python. It's not particularly #optimized in any way but it does give a", "the output class is determined. #Distance between 2 points def distanceBetween(point1,point2): return math.pow((math.pow(point1[0]", "sys #This program is just a rough reimplementation of k nearest neighbors in", "the closest #neighbors to a particular test vector, and then how the output", "range(0,numTrainExamples): distance = distanceBetween(Xtrain[x],Xtest) distanceAndLocation.append([Xtrain[x], distance, Ytrain[x]]) distanceAndLocation = sorted(distanceAndLocation, key=itemgetter(1)) if len(distanceAndLocation)", "xy graph Xtrain = [[3,1],[3,2],[1,2],[1,3],[4,4],[5,5],[5,7],[7,5],[8,8]] Ytrain = [2,1,2,1,1,0,0,0,1] #Hyperparamters numTrainExamples = len(Xtrain) numNeighbors", "0 for i in range(0,len(classCount)): if (classCount[i] > maxCount): maxCount = classCount[i] index", "<reponame>nightheronry/Basic-ML-Algorithm-Reimplementations from __future__ import division import numpy as np import math from operator", "Xtest = [2,1.2] minDistance = sys.maxint distanceAndLocation = [] for x in range(0,numTrainExamples):", "an 4x4 xy graph Xtrain = [[3,1],[3,2],[1,2],[1,3],[4,4],[5,5],[5,7],[7,5],[8,8]] Ytrain = [2,1,2,1,1,0,0,0,1] #Hyperparamters numTrainExamples =", "point2[1],2)),0.5) #Visualize these data points on an 4x4 xy graph Xtrain = [[3,1],[3,2],[1,2],[1,3],[4,4],[5,5],[5,7],[7,5],[8,8]]", "division import numpy as np import math from operator import itemgetter import sys", "[[3,1],[3,2],[1,2],[1,3],[4,4],[5,5],[5,7],[7,5],[8,8]] Ytrain = [2,1,2,1,1,0,0,0,1] #Hyperparamters numTrainExamples = len(Xtrain) numNeighbors = 5 numClasses =", "= np.zeros(numClasses) for i in range(0,numNeighbors): temp = distanceAndLocation[i] classCount[temp[2]] = classCount[temp[2]] +", "import itemgetter import sys #This program is just a rough reimplementation of k", "labeled starting from 0...numCLasses - 1 Xtest = [2,1.2] minDistance = sys.maxint distanceAndLocation", "+ 1 maxCount = 0 index = 0 for i in range(0,len(classCount)): if", "5 numClasses = 3 #Classes have to be labeled starting from 0...numCLasses -", "= sorted(distanceAndLocation, key=itemgetter(1)) if len(distanceAndLocation) >= numNeighbors: classCount = np.zeros(numClasses) for i in", "= classCount[temp[2]] + 1 maxCount = 0 index = 0 for i in", "give a sense of how the the algorithm chooses the closest #neighbors to", "vector, and then how the output class is determined. #Distance between 2 points", "3 #Classes have to be labeled starting from 0...numCLasses - 1 Xtest =", "= [2,1.2] minDistance = sys.maxint distanceAndLocation = [] for x in range(0,numTrainExamples): distance", "in any way but it does give a sense of how the the", "numpy as np import math from operator import itemgetter import sys #This program", "Xtrain = [[3,1],[3,2],[1,2],[1,3],[4,4],[5,5],[5,7],[7,5],[8,8]] Ytrain = [2,1,2,1,1,0,0,0,1] #Hyperparamters numTrainExamples = len(Xtrain) numNeighbors = 5", "[2,1.2] minDistance = sys.maxint distanceAndLocation = [] for x in range(0,numTrainExamples): distance =", "way but it does give a sense of how the the algorithm chooses", "#This program is just a rough reimplementation of k nearest neighbors in Python.", "distanceBetween(Xtrain[x],Xtest) distanceAndLocation.append([Xtrain[x], distance, Ytrain[x]]) distanceAndLocation = sorted(distanceAndLocation, key=itemgetter(1)) if len(distanceAndLocation) >= numNeighbors: classCount", "graph Xtrain = [[3,1],[3,2],[1,2],[1,3],[4,4],[5,5],[5,7],[7,5],[8,8]] Ytrain = [2,1,2,1,1,0,0,0,1] #Hyperparamters numTrainExamples = len(Xtrain) numNeighbors =", "nearest neighbors in Python. It's not particularly #optimized in any way but it", "math from operator import itemgetter import sys #This program is just a rough", "classCount[temp[2]] + 1 maxCount = 0 index = 0 for i in range(0,len(classCount)):", "= [] for x in range(0,numTrainExamples): distance = distanceBetween(Xtrain[x],Xtest) distanceAndLocation.append([Xtrain[x], distance, Ytrain[x]]) distanceAndLocation", "then how the output class is determined. #Distance between 2 points def distanceBetween(point1,point2):", "the the algorithm chooses the closest #neighbors to a particular test vector, and", "= i print classCount print index else: print 'Number of points less than", "sorted(distanceAndLocation, key=itemgetter(1)) if len(distanceAndLocation) >= numNeighbors: classCount = np.zeros(numClasses) for i in range(0,numNeighbors):", "in range(0,len(classCount)): if (classCount[i] > maxCount): maxCount = classCount[i] index = i print", "itemgetter import sys #This program is just a rough reimplementation of k nearest", "in range(0,numTrainExamples): distance = distanceBetween(Xtrain[x],Xtest) distanceAndLocation.append([Xtrain[x], distance, Ytrain[x]]) distanceAndLocation = sorted(distanceAndLocation, key=itemgetter(1)) if", "i in range(0,len(classCount)): if (classCount[i] > maxCount): maxCount = classCount[i] index = i", "distanceAndLocation.append([Xtrain[x], distance, Ytrain[x]]) distanceAndLocation = sorted(distanceAndLocation, key=itemgetter(1)) if len(distanceAndLocation) >= numNeighbors: classCount =", "(classCount[i] > maxCount): maxCount = classCount[i] index = i print classCount print index", "= distanceBetween(Xtrain[x],Xtest) distanceAndLocation.append([Xtrain[x], distance, Ytrain[x]]) distanceAndLocation = sorted(distanceAndLocation, key=itemgetter(1)) if len(distanceAndLocation) >= numNeighbors:", "but it does give a sense of how the the algorithm chooses the", "just a rough reimplementation of k nearest neighbors in Python. It's not particularly", "if (classCount[i] > maxCount): maxCount = classCount[i] index = i print classCount print", "maxCount): maxCount = classCount[i] index = i print classCount print index else: print", "4x4 xy graph Xtrain = [[3,1],[3,2],[1,2],[1,3],[4,4],[5,5],[5,7],[7,5],[8,8]] Ytrain = [2,1,2,1,1,0,0,0,1] #Hyperparamters numTrainExamples = len(Xtrain)", "from __future__ import division import numpy as np import math from operator import", "to a particular test vector, and then how the output class is determined." ]
[ "that path layout = call(Atom(\"dasherl_router\"), Atom(\"render\"), [path]) # maybe route doesnt exists so", "layout from dasherl erlang side.\"\"\" # simply call to erlang module and ask", "side.\"\"\" # simply call to erlang module and ask for app rendering that", "doesnt exists so validate response if layout == 'no_such_layout': return None else: return", "erlport.erlang import call def render_layout(path): \"\"\"Render a layout from dasherl erlang side.\"\"\" #", "call to erlang module and ask for app rendering that path layout =", "Atom(\"render\"), [path]) # maybe route doesnt exists so validate response if layout ==", "# some errors using the interface with the main app. # imports from", "# maybe route doesnt exists so validate response if layout == 'no_such_layout': return", "call def render_layout(path): \"\"\"Render a layout from dasherl erlang side.\"\"\" # simply call", "route doesnt exists so validate response if layout == 'no_such_layout': return None else:", "def render_layout(path): \"\"\"Render a layout from dasherl erlang side.\"\"\" # simply call to", "code in this py file is separated because there are # some errors", "for app rendering that path layout = call(Atom(\"dasherl_router\"), Atom(\"render\"), [path]) # maybe route", "call(Atom(\"dasherl_router\"), Atom(\"render\"), [path]) # maybe route doesnt exists so validate response if layout", "from erlport.erlterms import Atom from erlport.erlang import call def render_layout(path): \"\"\"Render a layout", "<gh_stars>1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- # The code in this", "import call def render_layout(path): \"\"\"Render a layout from dasherl erlang side.\"\"\" # simply", "erlang module and ask for app rendering that path layout = call(Atom(\"dasherl_router\"), Atom(\"render\"),", "there are # some errors using the interface with the main app. #", "# The code in this py file is separated because there are #", "this py file is separated because there are # some errors using the", "utf-8 -*- # The code in this py file is separated because there", "The code in this py file is separated because there are # some", "because there are # some errors using the interface with the main app.", "# imports from erlport from erlport.erlterms import Atom from erlport.erlang import call def", "Atom from erlport.erlang import call def render_layout(path): \"\"\"Render a layout from dasherl erlang", "# -*- coding: utf-8 -*- # The code in this py file is", "from erlport from erlport.erlterms import Atom from erlport.erlang import call def render_layout(path): \"\"\"Render", "\"\"\"Render a layout from dasherl erlang side.\"\"\" # simply call to erlang module", "ask for app rendering that path layout = call(Atom(\"dasherl_router\"), Atom(\"render\"), [path]) # maybe", "app rendering that path layout = call(Atom(\"dasherl_router\"), Atom(\"render\"), [path]) # maybe route doesnt", "path layout = call(Atom(\"dasherl_router\"), Atom(\"render\"), [path]) # maybe route doesnt exists so validate", "layout = call(Atom(\"dasherl_router\"), Atom(\"render\"), [path]) # maybe route doesnt exists so validate response", "coding: utf-8 -*- # The code in this py file is separated because", "is separated because there are # some errors using the interface with the", "simply call to erlang module and ask for app rendering that path layout", "the main app. # imports from erlport from erlport.erlterms import Atom from erlport.erlang", "app. # imports from erlport from erlport.erlterms import Atom from erlport.erlang import call", "interface with the main app. # imports from erlport from erlport.erlterms import Atom", "erlport from erlport.erlterms import Atom from erlport.erlang import call def render_layout(path): \"\"\"Render a", "using the interface with the main app. # imports from erlport from erlport.erlterms", "dasherl erlang side.\"\"\" # simply call to erlang module and ask for app", "to erlang module and ask for app rendering that path layout = call(Atom(\"dasherl_router\"),", "module and ask for app rendering that path layout = call(Atom(\"dasherl_router\"), Atom(\"render\"), [path])", "render_layout(path): \"\"\"Render a layout from dasherl erlang side.\"\"\" # simply call to erlang", "# simply call to erlang module and ask for app rendering that path", "-*- coding: utf-8 -*- # The code in this py file is separated", "maybe route doesnt exists so validate response if layout == 'no_such_layout': return None", "in this py file is separated because there are # some errors using", "and ask for app rendering that path layout = call(Atom(\"dasherl_router\"), Atom(\"render\"), [path]) #", "from dasherl erlang side.\"\"\" # simply call to erlang module and ask for", "are # some errors using the interface with the main app. # imports", "from erlport.erlang import call def render_layout(path): \"\"\"Render a layout from dasherl erlang side.\"\"\"", "the interface with the main app. # imports from erlport from erlport.erlterms import", "exists so validate response if layout == 'no_such_layout': return None else: return layout", "separated because there are # some errors using the interface with the main", "erlport.erlterms import Atom from erlport.erlang import call def render_layout(path): \"\"\"Render a layout from", "with the main app. # imports from erlport from erlport.erlterms import Atom from", "erlang side.\"\"\" # simply call to erlang module and ask for app rendering", "a layout from dasherl erlang side.\"\"\" # simply call to erlang module and", "= call(Atom(\"dasherl_router\"), Atom(\"render\"), [path]) # maybe route doesnt exists so validate response if", "errors using the interface with the main app. # imports from erlport from", "py file is separated because there are # some errors using the interface", "rendering that path layout = call(Atom(\"dasherl_router\"), Atom(\"render\"), [path]) # maybe route doesnt exists", "main app. # imports from erlport from erlport.erlterms import Atom from erlport.erlang import", "[path]) # maybe route doesnt exists so validate response if layout == 'no_such_layout':", "imports from erlport from erlport.erlterms import Atom from erlport.erlang import call def render_layout(path):", "python # -*- coding: utf-8 -*- # The code in this py file", "file is separated because there are # some errors using the interface with", "some errors using the interface with the main app. # imports from erlport", "-*- # The code in this py file is separated because there are", "#!/usr/bin/env python # -*- coding: utf-8 -*- # The code in this py", "import Atom from erlport.erlang import call def render_layout(path): \"\"\"Render a layout from dasherl" ]
[ "Real-numbers. ## Output Description Find the largest set of triangles whose average surface", "results, you respect the [right-hand rule](http://en.wikipedia.org/wiki/Right-hand_rule) for consistency. At **minimum**, this set must", "respect the [right-hand rule](http://en.wikipedia.org/wiki/Right-hand_rule) for consistency. At **minimum**, this set must match the", "target vector is defined as minimizing for the smallest angle between the two", "in space. Each line has three [Real-numbers](https://en.wikipedia.org/wiki/Real_number) that are space -delimited. The last", "smallest angle between the two (as computed through the [dot-product](http://en.wikipedia.org/wiki/Dot_product) ). Though each", "that you are trying to align-with: it is also represented as three space-delimited", "Your goal is to find the maximum set of non-intersecting triangles that can", "be line N+1, is the target vector that you are trying to align-with:", "Each line has three [Real-numbers](https://en.wikipedia.org/wiki/Real_number) that are space -delimited. The last line, which", "*Original author: /u/nint22. This challenge is a little more math-heavy than usual, but", "possible. \"Closeness\" between the average surface normal and target vector is defined as", "as minimizing for the smallest angle between the two (as computed through the", "lines, each being a 3D point in space. Each line has three [Real-numbers](https://en.wikipedia.org/wiki/Real_number)", "0.6426 -0.4931 -0.1100 -0.3525 0.3548 0.577 -0.577 0.577 ## Sample Output **The author", "be defined with three points in 3D space: one for each corner. One", "three-dimensional triangle can be defined with three points in 3D space: one for", "Search A three-dimensional triangle can be defined with three points in 3D space:", "that can be constructed with these N points (points may be shared between", "set's average surface normal is as close to the given vector's direction as", "is also represented as three space-delimited Real-numbers. ## Output Description Find the largest", "with these N points (points may be shared between triangles) such that this", "indices used. If no set is found, print \"No valid result found\". #", "& Outputs ## Sample Input 5 0.6652 -0.1405 0.7143 0.2223 0.3001 0.7125 -0.9931", "such that this set's average surface normal is as close to the given", "[07/12/13] Challenge #126 [Hard] Not-So-Normal Triangle Search https://www.reddit.com/r/dailyprogrammer/comments/1i65z6/071213_challenge_126_hard_notsonormal_triangle/ # [](#HardIcon) *(Hard)*: Not-So-Normal Triangle", "than or equal to 3. Your goal is to find the maximum set", "defined with three points in 3D space: one for each corner. One can", "Input Description You will be given an integer N which represents the N-following", "formulas you'll need. Triangle-triangle intersection will be the most tricky part!* # Formal", "Inputs & Outputs ## Input Description You will be given an integer N", "At **minimum**, this set must match the target vector with less than 10", "of the two sides), we don't care about which one you choose: just", "the target vector with less than 10 degrees of difference. *Original author: /u/nint22.", "print \"No valid result found\". # Sample Inputs & Outputs ## Sample Input", "that N is greater than or equal to 3. Your goal is to", "difference. *Original author: /u/nint22. This challenge is a little more math-heavy than usual,", "normals match the target vector direction within at minimum 10 degrees. Print the", "per line, where a triangle is defined as the three point indices used.", "following results are \"bad\"/\"faked\", and are only examples of \"valid output format\".** 0", "the N-following lines, each being a 3D point in space. Each line has", "care about which one you choose: just make sure that when printing the", "the three points to compute the [cross-product](http://en.wikipedia.org/wiki/Cross_product). You will be given a set", "N points (points may be shared between triangles) such that this set's average", "average surface normal is as close to the given vector's direction as possible.", "data gets a +1 gold medal! The following results are \"bad\"/\"faked\", and are", "when printing the results, you respect the [right-hand rule](http://en.wikipedia.org/wiki/Right-hand_rule) for consistency. At **minimum**,", "can be defined with three points in 3D space: one for each corner.", "a +1 gold medal! The following results are \"bad\"/\"faked\", and are only examples", "is the target vector that you are trying to align-with: it is also", "set of triangles whose average surface normals match the target vector direction within", "0.0669 0.0665 0.6426 -0.4931 -0.1100 -0.3525 0.3548 0.577 -0.577 0.577 ## Sample Output", "Wikipedia has all the formulas you'll need. Triangle-triangle intersection will be the most", "10 degrees of difference. *Original author: /u/nint22. This challenge is a little more", "in 3D space: one for each corner. One can compute the [surface-normal](http://en.wikipedia.org/wiki/Normal_(geometry\\)) of", "be given a set of N points, such that N is greater than", "points, such that N is greater than or equal to 3. Your goal", "Challenge #126 [Hard] Not-So-Normal Triangle Search https://www.reddit.com/r/dailyprogrammer/comments/1i65z6/071213_challenge_126_hard_notsonormal_triangle/ # [](#HardIcon) *(Hard)*: Not-So-Normal Triangle Search", "less than 10 degrees of difference. *Original author: /u/nint22. This challenge is a", "Print the result as one triangle per line, where a triangle is defined", "\"bad\"/\"faked\", and are only examples of \"valid output format\".** 0 1 2 1", "whose average surface normals match the target vector direction within at minimum 10", "to align-with: it is also represented as three space-delimited Real-numbers. ## Output Description", "average surface normals match the target vector direction within at minimum 10 degrees.", "the results, you respect the [right-hand rule](http://en.wikipedia.org/wiki/Right-hand_rule) for consistency. At **minimum**, this set", "to post good demo data gets a +1 gold medal! The following results", "shared between triangles) such that this set's average surface normal is as close", "or equal to 3. Your goal is to find the maximum set of", "a solution to generate some results with; first person to post good demo", "Outputs ## Input Description You will be given an integer N which represents", "align-with: it is also represented as three space-delimited Real-numbers. ## Output Description Find", "average surface normal and target vector is defined as minimizing for the smallest", "[surface-normal](http://en.wikipedia.org/wiki/Normal_(geometry\\)) of this triangle by using the three points to compute the [cross-product](http://en.wikipedia.org/wiki/Cross_product).", "degrees of difference. *Original author: /u/nint22. This challenge is a little more math-heavy", "be the most tricky part!* # Formal Inputs & Outputs ## Input Description", "with three points in 3D space: one for each corner. One can compute", "is defined as minimizing for the smallest angle between the two (as computed", "gets a +1 gold medal! The following results are \"bad\"/\"faked\", and are only", "medal! The following results are \"bad\"/\"faked\", and are only examples of \"valid output", "will be line N+1, is the target vector that you are trying to", "normals (one for each of the two sides), we don't care about which", "math-heavy than usual, but don't worry: the math isn't hard, and Wikipedia has", "rule](http://en.wikipedia.org/wiki/Right-hand_rule) for consistency. At **minimum**, this set must match the target vector with", "we don't care about which one you choose: just make sure that when", "the target vector that you are trying to align-with: it is also represented", "2 1 4 2 \"\"\" def main(): pass if __name__ == \"__main__\": main()", "points to compute the [cross-product](http://en.wikipedia.org/wiki/Cross_product). You will be given a set of N", "[cross-product](http://en.wikipedia.org/wiki/Cross_product). You will be given a set of N points, such that N", "through the [dot-product](http://en.wikipedia.org/wiki/Dot_product) ). Though each triangle has two surface normals (one for", "some results with; first person to post good demo data gets a +1", "consistency. At **minimum**, this set must match the target vector with less than", "N points, such that N is greater than or equal to 3. Your", "/u/nint22. This challenge is a little more math-heavy than usual, but don't worry:", "sides), we don't care about which one you choose: just make sure that", "are \"bad\"/\"faked\", and are only examples of \"valid output format\".** 0 1 2", "using the three points to compute the [cross-product](http://en.wikipedia.org/wiki/Cross_product). You will be given a", "for each corner. One can compute the [surface-normal](http://en.wikipedia.org/wiki/Normal_(geometry\\)) of this triangle by using", "is greater than or equal to 3. Your goal is to find the", "3D point in space. Each line has three [Real-numbers](https://en.wikipedia.org/wiki/Real_number) that are space -delimited.", "as three space-delimited Real-numbers. ## Output Description Find the largest set of triangles", "line N+1, is the target vector that you are trying to align-with: it", "the [surface-normal](http://en.wikipedia.org/wiki/Normal_(geometry\\)) of this triangle by using the three points to compute the", "close to the given vector's direction as possible. \"Closeness\" between the average surface", "# Sample Inputs & Outputs ## Sample Input 5 0.6652 -0.1405 0.7143 0.2223", "you respect the [right-hand rule](http://en.wikipedia.org/wiki/Right-hand_rule) for consistency. At **minimum**, this set must match", "surface normals (one for each of the two sides), we don't care about", "-delimited. The last line, which will be line N+1, is the target vector", "person to post good demo data gets a +1 gold medal! The following", "each triangle has two surface normals (one for each of the two sides),", "you are trying to align-with: it is also represented as three space-delimited Real-numbers.", "find the maximum set of non-intersecting triangles that can be constructed with these", "& Outputs ## Input Description You will be given an integer N which", "is found, print \"No valid result found\". # Sample Inputs & Outputs ##", "each being a 3D point in space. Each line has three [Real-numbers](https://en.wikipedia.org/wiki/Real_number) that", "0.2223 0.3001 0.7125 -0.9931 0.9613 0.0669 0.0665 0.6426 -0.4931 -0.1100 -0.3525 0.3548 0.577", "the result as one triangle per line, where a triangle is defined as", "set of non-intersecting triangles that can be constructed with these N points (points", "surface normals match the target vector direction within at minimum 10 degrees. Print", "vector's direction as possible. \"Closeness\" between the average surface normal and target vector", "also represented as three space-delimited Real-numbers. ## Output Description Find the largest set", "defined as the three point indices used. If no set is found, print", "[right-hand rule](http://en.wikipedia.org/wiki/Right-hand_rule) for consistency. At **minimum**, this set must match the target vector", "triangles whose average surface normals match the target vector direction within at minimum", "to find the maximum set of non-intersecting triangles that can be constructed with", "N-following lines, each being a 3D point in space. Each line has three", "You will be given a set of N points, such that N is", "points in 3D space: one for each corner. One can compute the [surface-normal](http://en.wikipedia.org/wiki/Normal_(geometry\\))", "set must match the target vector with less than 10 degrees of difference.", "The following results are \"bad\"/\"faked\", and are only examples of \"valid output format\".**", "largest set of triangles whose average surface normals match the target vector direction", "0.577 ## Sample Output **The author is still working on a solution to", "part!* # Formal Inputs & Outputs ## Input Description You will be given", "triangles) such that this set's average surface normal is as close to the", "0.3548 0.577 -0.577 0.577 ## Sample Output **The author is still working on", "but don't worry: the math isn't hard, and Wikipedia has all the formulas", "0.6652 -0.1405 0.7143 0.2223 0.3001 0.7125 -0.9931 0.9613 0.0669 0.0665 0.6426 -0.4931 -0.1100", "this set's average surface normal is as close to the given vector's direction", "three space-delimited Real-numbers. ## Output Description Find the largest set of triangles whose", "non-intersecting triangles that can be constructed with these N points (points may be", "Output **The author is still working on a solution to generate some results", "3. Your goal is to find the maximum set of non-intersecting triangles that", "three point indices used. If no set is found, print \"No valid result", "Not-So-Normal Triangle Search https://www.reddit.com/r/dailyprogrammer/comments/1i65z6/071213_challenge_126_hard_notsonormal_triangle/ # [](#HardIcon) *(Hard)*: Not-So-Normal Triangle Search A three-dimensional triangle", "A three-dimensional triangle can be defined with three points in 3D space: one", "N is greater than or equal to 3. Your goal is to find", "demo data gets a +1 gold medal! The following results are \"bad\"/\"faked\", and", "be shared between triangles) such that this set's average surface normal is as", "line, where a triangle is defined as the three point indices used. If", "that when printing the results, you respect the [right-hand rule](http://en.wikipedia.org/wiki/Right-hand_rule) for consistency. At", "Inputs & Outputs ## Sample Input 5 0.6652 -0.1405 0.7143 0.2223 0.3001 0.7125", "triangle has two surface normals (one for each of the two sides), we", "Triangle Search A three-dimensional triangle can be defined with three points in 3D", "points (points may be shared between triangles) such that this set's average surface", "0.7125 -0.9931 0.9613 0.0669 0.0665 0.6426 -0.4931 -0.1100 -0.3525 0.3548 0.577 -0.577 0.577", "0.577 -0.577 0.577 ## Sample Output **The author is still working on a", "triangles that can be constructed with these N points (points may be shared", "space -delimited. The last line, which will be line N+1, is the target", "three [Real-numbers](https://en.wikipedia.org/wiki/Real_number) that are space -delimited. The last line, which will be line", "Description Find the largest set of triangles whose average surface normals match the", "). Though each triangle has two surface normals (one for each of the", "it is also represented as three space-delimited Real-numbers. ## Output Description Find the", "maximum set of non-intersecting triangles that can be constructed with these N points", "output format\".** 0 1 2 1 4 2 \"\"\" def main(): pass if", "between triangles) such that this set's average surface normal is as close to", "has two surface normals (one for each of the two sides), we don't", "just make sure that when printing the results, you respect the [right-hand rule](http://en.wikipedia.org/wiki/Right-hand_rule)", "## Sample Input 5 0.6652 -0.1405 0.7143 0.2223 0.3001 0.7125 -0.9931 0.9613 0.0669", "be given an integer N which represents the N-following lines, each being a", "the three point indices used. If no set is found, print \"No valid", "one you choose: just make sure that when printing the results, you respect", "target vector that you are trying to align-with: it is also represented as", "and are only examples of \"valid output format\".** 0 1 2 1 4", "**minimum**, this set must match the target vector with less than 10 degrees", "vector is defined as minimizing for the smallest angle between the two (as", "the two (as computed through the [dot-product](http://en.wikipedia.org/wiki/Dot_product) ). Though each triangle has two", "Formal Inputs & Outputs ## Input Description You will be given an integer", "challenge is a little more math-heavy than usual, but don't worry: the math", "the most tricky part!* # Formal Inputs & Outputs ## Input Description You", "goal is to find the maximum set of non-intersecting triangles that can be", "(one for each of the two sides), we don't care about which one", "the target vector direction within at minimum 10 degrees. Print the result as", "are only examples of \"valid output format\".** 0 1 2 1 4 2", "can be constructed with these N points (points may be shared between triangles)", "choose: just make sure that when printing the results, you respect the [right-hand", "a little more math-heavy than usual, but don't worry: the math isn't hard,", "are trying to align-with: it is also represented as three space-delimited Real-numbers. ##", "vector with less than 10 degrees of difference. *Original author: /u/nint22. This challenge", "used. If no set is found, print \"No valid result found\". # Sample", "Outputs ## Sample Input 5 0.6652 -0.1405 0.7143 0.2223 0.3001 0.7125 -0.9931 0.9613", "may be shared between triangles) such that this set's average surface normal is", "most tricky part!* # Formal Inputs & Outputs ## Input Description You will", "which represents the N-following lines, each being a 3D point in space. Each", "The last line, which will be line N+1, is the target vector that", "format\".** 0 1 2 1 4 2 \"\"\" def main(): pass if __name__", "will be given an integer N which represents the N-following lines, each being", "worry: the math isn't hard, and Wikipedia has all the formulas you'll need.", "will be the most tricky part!* # Formal Inputs & Outputs ## Input", "represented as three space-delimited Real-numbers. ## Output Description Find the largest set of", "minimum 10 degrees. Print the result as one triangle per line, where a", "result found\". # Sample Inputs & Outputs ## Sample Input 5 0.6652 -0.1405", "0.0665 0.6426 -0.4931 -0.1100 -0.3525 0.3548 0.577 -0.577 0.577 ## Sample Output **The", "## Sample Output **The author is still working on a solution to generate", "is defined as the three point indices used. If no set is found,", "is a little more math-heavy than usual, but don't worry: the math isn't", "the given vector's direction as possible. \"Closeness\" between the average surface normal and", "has all the formulas you'll need. Triangle-triangle intersection will be the most tricky", "don't care about which one you choose: just make sure that when printing", "vector direction within at minimum 10 degrees. Print the result as one triangle", "target vector direction within at minimum 10 degrees. Print the result as one", "valid result found\". # Sample Inputs & Outputs ## Sample Input 5 0.6652", "triangle per line, where a triangle is defined as the three point indices", "as the three point indices used. If no set is found, print \"No", "intersection will be the most tricky part!* # Formal Inputs & Outputs ##", "-0.3525 0.3548 0.577 -0.577 0.577 ## Sample Output **The author is still working", "two sides), we don't care about which one you choose: just make sure", "N+1, is the target vector that you are trying to align-with: it is", "You will be given an integer N which represents the N-following lines, each", "compute the [cross-product](http://en.wikipedia.org/wiki/Cross_product). You will be given a set of N points, such", "tricky part!* # Formal Inputs & Outputs ## Input Description You will be", "## Output Description Find the largest set of triangles whose average surface normals", "don't worry: the math isn't hard, and Wikipedia has all the formulas you'll", "Though each triangle has two surface normals (one for each of the two", "for each of the two sides), we don't care about which one you", "make sure that when printing the results, you respect the [right-hand rule](http://en.wikipedia.org/wiki/Right-hand_rule) for", "generate some results with; first person to post good demo data gets a", "minimizing for the smallest angle between the two (as computed through the [dot-product](http://en.wikipedia.org/wiki/Dot_product)", "greater than or equal to 3. Your goal is to find the maximum", "solution to generate some results with; first person to post good demo data", "to 3. Your goal is to find the maximum set of non-intersecting triangles", "point indices used. If no set is found, print \"No valid result found\".", "-0.577 0.577 ## Sample Output **The author is still working on a solution", "the math isn't hard, and Wikipedia has all the formulas you'll need. Triangle-triangle", "10 degrees. Print the result as one triangle per line, where a triangle", "and target vector is defined as minimizing for the smallest angle between the", "## Input Description You will be given an integer N which represents the", "0.9613 0.0669 0.0665 0.6426 -0.4931 -0.1100 -0.3525 0.3548 0.577 -0.577 0.577 ## Sample", "Not-So-Normal Triangle Search A three-dimensional triangle can be defined with three points in", "to generate some results with; first person to post good demo data gets", "surface normal and target vector is defined as minimizing for the smallest angle", "working on a solution to generate some results with; first person to post", "to compute the [cross-product](http://en.wikipedia.org/wiki/Cross_product). You will be given a set of N points,", "is still working on a solution to generate some results with; first person", "are space -delimited. The last line, which will be line N+1, is the", "line has three [Real-numbers](https://en.wikipedia.org/wiki/Real_number) that are space -delimited. The last line, which will", "3D space: one for each corner. One can compute the [surface-normal](http://en.wikipedia.org/wiki/Normal_(geometry\\)) of this", "match the target vector with less than 10 degrees of difference. *Original author:", "# [](#HardIcon) *(Hard)*: Not-So-Normal Triangle Search A three-dimensional triangle can be defined with", "need. Triangle-triangle intersection will be the most tricky part!* # Formal Inputs &", "angle between the two (as computed through the [dot-product](http://en.wikipedia.org/wiki/Dot_product) ). Though each triangle", "good demo data gets a +1 gold medal! The following results are \"bad\"/\"faked\",", "three points to compute the [cross-product](http://en.wikipedia.org/wiki/Cross_product). You will be given a set of", "one triangle per line, where a triangle is defined as the three point", "results are \"bad\"/\"faked\", and are only examples of \"valid output format\".** 0 1", "#126 [Hard] Not-So-Normal Triangle Search https://www.reddit.com/r/dailyprogrammer/comments/1i65z6/071213_challenge_126_hard_notsonormal_triangle/ # [](#HardIcon) *(Hard)*: Not-So-Normal Triangle Search A", "for the smallest angle between the two (as computed through the [dot-product](http://en.wikipedia.org/wiki/Dot_product) ).", "Sample Inputs & Outputs ## Sample Input 5 0.6652 -0.1405 0.7143 0.2223 0.3001", "an integer N which represents the N-following lines, each being a 3D point", "that this set's average surface normal is as close to the given vector's", "than 10 degrees of difference. *Original author: /u/nint22. This challenge is a little", "direction as possible. \"Closeness\" between the average surface normal and target vector is", "If no set is found, print \"No valid result found\". # Sample Inputs", "0.7143 0.2223 0.3001 0.7125 -0.9931 0.9613 0.0669 0.0665 0.6426 -0.4931 -0.1100 -0.3525 0.3548", "about which one you choose: just make sure that when printing the results,", "first person to post good demo data gets a +1 gold medal! The", "which one you choose: just make sure that when printing the results, you", "each of the two sides), we don't care about which one you choose:", "normal and target vector is defined as minimizing for the smallest angle between", "the two sides), we don't care about which one you choose: just make", "given vector's direction as possible. \"Closeness\" between the average surface normal and target", "-0.4931 -0.1100 -0.3525 0.3548 0.577 -0.577 0.577 ## Sample Output **The author is", "equal to 3. Your goal is to find the maximum set of non-intersecting", "gold medal! The following results are \"bad\"/\"faked\", and are only examples of \"valid", "the [dot-product](http://en.wikipedia.org/wiki/Dot_product) ). Though each triangle has two surface normals (one for each", "post good demo data gets a +1 gold medal! The following results are", "between the two (as computed through the [dot-product](http://en.wikipedia.org/wiki/Dot_product) ). Though each triangle has", "Triangle-triangle intersection will be the most tricky part!* # Formal Inputs & Outputs", "you'll need. Triangle-triangle intersection will be the most tricky part!* # Formal Inputs", "where a triangle is defined as the three point indices used. If no", "the [cross-product](http://en.wikipedia.org/wiki/Cross_product). You will be given a set of N points, such that", "[dot-product](http://en.wikipedia.org/wiki/Dot_product) ). Though each triangle has two surface normals (one for each of", "which will be line N+1, is the target vector that you are trying", "found, print \"No valid result found\". # Sample Inputs & Outputs ## Sample", "and Wikipedia has all the formulas you'll need. Triangle-triangle intersection will be the", "the maximum set of non-intersecting triangles that can be constructed with these N", "will be given a set of N points, such that N is greater", "of difference. *Original author: /u/nint22. This challenge is a little more math-heavy than", "a 3D point in space. Each line has three [Real-numbers](https://en.wikipedia.org/wiki/Real_number) that are space", "corner. One can compute the [surface-normal](http://en.wikipedia.org/wiki/Normal_(geometry\\)) of this triangle by using the three", "the smallest angle between the two (as computed through the [dot-product](http://en.wikipedia.org/wiki/Dot_product) ). Though", "vector that you are trying to align-with: it is also represented as three", "0.3001 0.7125 -0.9931 0.9613 0.0669 0.0665 0.6426 -0.4931 -0.1100 -0.3525 0.3548 0.577 -0.577", "for consistency. At **minimum**, this set must match the target vector with less", "of triangles whose average surface normals match the target vector direction within at", "that are space -delimited. The last line, which will be line N+1, is", "results with; first person to post good demo data gets a +1 gold", "three points in 3D space: one for each corner. One can compute the", "has three [Real-numbers](https://en.wikipedia.org/wiki/Real_number) that are space -delimited. The last line, which will be", "computed through the [dot-product](http://en.wikipedia.org/wiki/Dot_product) ). Though each triangle has two surface normals (one", "hard, and Wikipedia has all the formulas you'll need. Triangle-triangle intersection will be", "\"No valid result found\". # Sample Inputs & Outputs ## Sample Input 5", "with; first person to post good demo data gets a +1 gold medal!", "given an integer N which represents the N-following lines, each being a 3D", "to the given vector's direction as possible. \"Closeness\" between the average surface normal", "triangle can be defined with three points in 3D space: one for each", "represents the N-following lines, each being a 3D point in space. Each line", "math isn't hard, and Wikipedia has all the formulas you'll need. Triangle-triangle intersection", "as one triangle per line, where a triangle is defined as the three", "space. Each line has three [Real-numbers](https://en.wikipedia.org/wiki/Real_number) that are space -delimited. The last line,", "degrees. Print the result as one triangle per line, where a triangle is", "the average surface normal and target vector is defined as minimizing for the", "two (as computed through the [dot-product](http://en.wikipedia.org/wiki/Dot_product) ). Though each triangle has two surface", "more math-heavy than usual, but don't worry: the math isn't hard, and Wikipedia", "given a set of N points, such that N is greater than or", "than usual, but don't worry: the math isn't hard, and Wikipedia has all", "match the target vector direction within at minimum 10 degrees. Print the result", "found\". # Sample Inputs & Outputs ## Sample Input 5 0.6652 -0.1405 0.7143", "(as computed through the [dot-product](http://en.wikipedia.org/wiki/Dot_product) ). Though each triangle has two surface normals", "[Hard] Not-So-Normal Triangle Search https://www.reddit.com/r/dailyprogrammer/comments/1i65z6/071213_challenge_126_hard_notsonormal_triangle/ # [](#HardIcon) *(Hard)*: Not-So-Normal Triangle Search A three-dimensional", "author: /u/nint22. This challenge is a little more math-heavy than usual, but don't", "set of N points, such that N is greater than or equal to", "Search https://www.reddit.com/r/dailyprogrammer/comments/1i65z6/071213_challenge_126_hard_notsonormal_triangle/ # [](#HardIcon) *(Hard)*: Not-So-Normal Triangle Search A three-dimensional triangle can be", "Sample Output **The author is still working on a solution to generate some", "isn't hard, and Wikipedia has all the formulas you'll need. Triangle-triangle intersection will", "on a solution to generate some results with; first person to post good", "of non-intersecting triangles that can be constructed with these N points (points may", "https://www.reddit.com/r/dailyprogrammer/comments/1i65z6/071213_challenge_126_hard_notsonormal_triangle/ # [](#HardIcon) *(Hard)*: Not-So-Normal Triangle Search A three-dimensional triangle can be defined", "<reponame>DayGitH/Python-Challenges \"\"\" [07/12/13] Challenge #126 [Hard] Not-So-Normal Triangle Search https://www.reddit.com/r/dailyprogrammer/comments/1i65z6/071213_challenge_126_hard_notsonormal_triangle/ # [](#HardIcon) *(Hard)*:", "as possible. \"Closeness\" between the average surface normal and target vector is defined", "\"\"\" [07/12/13] Challenge #126 [Hard] Not-So-Normal Triangle Search https://www.reddit.com/r/dailyprogrammer/comments/1i65z6/071213_challenge_126_hard_notsonormal_triangle/ # [](#HardIcon) *(Hard)*: Not-So-Normal", "two surface normals (one for each of the two sides), we don't care", "One can compute the [surface-normal](http://en.wikipedia.org/wiki/Normal_(geometry\\)) of this triangle by using the three points", "\"valid output format\".** 0 1 2 1 4 2 \"\"\" def main(): pass", "constructed with these N points (points may be shared between triangles) such that", "you choose: just make sure that when printing the results, you respect the", "one for each corner. One can compute the [surface-normal](http://en.wikipedia.org/wiki/Normal_(geometry\\)) of this triangle by", "little more math-heavy than usual, but don't worry: the math isn't hard, and", "no set is found, print \"No valid result found\". # Sample Inputs &", "be constructed with these N points (points may be shared between triangles) such", "can compute the [surface-normal](http://en.wikipedia.org/wiki/Normal_(geometry\\)) of this triangle by using the three points to", "surface normal is as close to the given vector's direction as possible. \"Closeness\"", "space: one for each corner. One can compute the [surface-normal](http://en.wikipedia.org/wiki/Normal_(geometry\\)) of this triangle", "compute the [surface-normal](http://en.wikipedia.org/wiki/Normal_(geometry\\)) of this triangle by using the three points to compute", "each corner. One can compute the [surface-normal](http://en.wikipedia.org/wiki/Normal_(geometry\\)) of this triangle by using the", "integer N which represents the N-following lines, each being a 3D point in", "as close to the given vector's direction as possible. \"Closeness\" between the average", "defined as minimizing for the smallest angle between the two (as computed through", "all the formulas you'll need. Triangle-triangle intersection will be the most tricky part!*", "at minimum 10 degrees. Print the result as one triangle per line, where", "1 2 1 4 2 \"\"\" def main(): pass if __name__ == \"__main__\":", "between the average surface normal and target vector is defined as minimizing for", "triangle by using the three points to compute the [cross-product](http://en.wikipedia.org/wiki/Cross_product). You will be", "+1 gold medal! The following results are \"bad\"/\"faked\", and are only examples of", "a set of N points, such that N is greater than or equal", "the largest set of triangles whose average surface normals match the target vector", "examples of \"valid output format\".** 0 1 2 1 4 2 \"\"\" def", "trying to align-with: it is also represented as three space-delimited Real-numbers. ## Output", "normal is as close to the given vector's direction as possible. \"Closeness\" between", "direction within at minimum 10 degrees. Print the result as one triangle per", "usual, but don't worry: the math isn't hard, and Wikipedia has all the", "(points may be shared between triangles) such that this set's average surface normal", "set is found, print \"No valid result found\". # Sample Inputs & Outputs", "5 0.6652 -0.1405 0.7143 0.2223 0.3001 0.7125 -0.9931 0.9613 0.0669 0.0665 0.6426 -0.4931", "sure that when printing the results, you respect the [right-hand rule](http://en.wikipedia.org/wiki/Right-hand_rule) for consistency.", "This challenge is a little more math-heavy than usual, but don't worry: the", "result as one triangle per line, where a triangle is defined as the", "of \"valid output format\".** 0 1 2 1 4 2 \"\"\" def main():", "of N points, such that N is greater than or equal to 3.", "**The author is still working on a solution to generate some results with;", "triangle is defined as the three point indices used. If no set is", "N which represents the N-following lines, each being a 3D point in space.", "Sample Input 5 0.6652 -0.1405 0.7143 0.2223 0.3001 0.7125 -0.9931 0.9613 0.0669 0.0665", "the [right-hand rule](http://en.wikipedia.org/wiki/Right-hand_rule) for consistency. At **minimum**, this set must match the target", "printing the results, you respect the [right-hand rule](http://en.wikipedia.org/wiki/Right-hand_rule) for consistency. At **minimum**, this", "with less than 10 degrees of difference. *Original author: /u/nint22. This challenge is", "Description You will be given an integer N which represents the N-following lines,", "-0.9931 0.9613 0.0669 0.0665 0.6426 -0.4931 -0.1100 -0.3525 0.3548 0.577 -0.577 0.577 ##", "Triangle Search https://www.reddit.com/r/dailyprogrammer/comments/1i65z6/071213_challenge_126_hard_notsonormal_triangle/ # [](#HardIcon) *(Hard)*: Not-So-Normal Triangle Search A three-dimensional triangle can", "of this triangle by using the three points to compute the [cross-product](http://en.wikipedia.org/wiki/Cross_product). You", "\"Closeness\" between the average surface normal and target vector is defined as minimizing", "the formulas you'll need. Triangle-triangle intersection will be the most tricky part!* #", "[](#HardIcon) *(Hard)*: Not-So-Normal Triangle Search A three-dimensional triangle can be defined with three", "being a 3D point in space. Each line has three [Real-numbers](https://en.wikipedia.org/wiki/Real_number) that are", "Input 5 0.6652 -0.1405 0.7143 0.2223 0.3001 0.7125 -0.9931 0.9613 0.0669 0.0665 0.6426", "Find the largest set of triangles whose average surface normals match the target", "this set must match the target vector with less than 10 degrees of", "0 1 2 1 4 2 \"\"\" def main(): pass if __name__ ==", "author is still working on a solution to generate some results with; first", "a triangle is defined as the three point indices used. If no set", "must match the target vector with less than 10 degrees of difference. *Original", "[Real-numbers](https://en.wikipedia.org/wiki/Real_number) that are space -delimited. The last line, which will be line N+1,", "line, which will be line N+1, is the target vector that you are", "such that N is greater than or equal to 3. Your goal is", "is to find the maximum set of non-intersecting triangles that can be constructed", "Output Description Find the largest set of triangles whose average surface normals match", "only examples of \"valid output format\".** 0 1 2 1 4 2 \"\"\"", "within at minimum 10 degrees. Print the result as one triangle per line,", "space-delimited Real-numbers. ## Output Description Find the largest set of triangles whose average", "-0.1100 -0.3525 0.3548 0.577 -0.577 0.577 ## Sample Output **The author is still", "# Formal Inputs & Outputs ## Input Description You will be given an", "these N points (points may be shared between triangles) such that this set's", "is as close to the given vector's direction as possible. \"Closeness\" between the", "by using the three points to compute the [cross-product](http://en.wikipedia.org/wiki/Cross_product). You will be given", "this triangle by using the three points to compute the [cross-product](http://en.wikipedia.org/wiki/Cross_product). You will", "last line, which will be line N+1, is the target vector that you", "point in space. Each line has three [Real-numbers](https://en.wikipedia.org/wiki/Real_number) that are space -delimited. The", "*(Hard)*: Not-So-Normal Triangle Search A three-dimensional triangle can be defined with three points", "-0.1405 0.7143 0.2223 0.3001 0.7125 -0.9931 0.9613 0.0669 0.0665 0.6426 -0.4931 -0.1100 -0.3525", "still working on a solution to generate some results with; first person to", "target vector with less than 10 degrees of difference. *Original author: /u/nint22. This" ]
[ "wordDict): # write your code here return self._recursiveWordBreak(s, wordDict, {}) def _recursiveWordBreak(self, s,", "wordDict: A set of words. @return: All possible sentences. \"\"\" def wordBreak(self, s,", "len(word) == len(s): res.append(word) else: resultOfTheRest = self._recursiveWordBreak(s[len(word):], wordDict, memo) for item in", "@param: wordDict: A set of words. @return: All possible sentences. \"\"\" def wordBreak(self,", "words. @return: All possible sentences. \"\"\" def wordBreak(self, s, wordDict): # write your", "s, wordDict): # write your code here return self._recursiveWordBreak(s, wordDict, {}) def _recursiveWordBreak(self,", "A string @param: wordDict: A set of words. @return: All possible sentences. \"\"\"", "s, wordDict, memo): if s in memo: return memo[s] if not s: return", "in resultOfTheRest: item = word + ' ' + item res.append(item) memo[s] =", "= [] for word in wordDict: if not word or not s.startswith(word): continue", "s in memo: return memo[s] if not s: return [] res = []", "if not s: return [] res = [] for word in wordDict: if", "wordDict, memo): if s in memo: return memo[s] if not s: return []", "not s.startswith(word): continue if len(word) == len(s): res.append(word) else: resultOfTheRest = self._recursiveWordBreak(s[len(word):], wordDict,", "here return self._recursiveWordBreak(s, wordDict, {}) def _recursiveWordBreak(self, s, wordDict, memo): if s in", "memo[s] if not s: return [] res = [] for word in wordDict:", "for item in resultOfTheRest: item = word + ' ' + item res.append(item)", "your code here return self._recursiveWordBreak(s, wordDict, {}) def _recursiveWordBreak(self, s, wordDict, memo): if", "string @param: wordDict: A set of words. @return: All possible sentences. \"\"\" def", "or not s.startswith(word): continue if len(word) == len(s): res.append(word) else: resultOfTheRest = self._recursiveWordBreak(s[len(word):],", "resultOfTheRest: item = word + ' ' + item res.append(item) memo[s] = res", "memo) for item in resultOfTheRest: item = word + ' ' + item", "continue if len(word) == len(s): res.append(word) else: resultOfTheRest = self._recursiveWordBreak(s[len(word):], wordDict, memo) for", "# write your code here return self._recursiveWordBreak(s, wordDict, {}) def _recursiveWordBreak(self, s, wordDict,", "sentences. \"\"\" def wordBreak(self, s, wordDict): # write your code here return self._recursiveWordBreak(s,", "= word + ' ' + item res.append(item) memo[s] = res return res", "set of words. @return: All possible sentences. \"\"\" def wordBreak(self, s, wordDict): #", "= self._recursiveWordBreak(s[len(word):], wordDict, memo) for item in resultOfTheRest: item = word + '", "memo: return memo[s] if not s: return [] res = [] for word", "if len(word) == len(s): res.append(word) else: resultOfTheRest = self._recursiveWordBreak(s[len(word):], wordDict, memo) for item", "not word or not s.startswith(word): continue if len(word) == len(s): res.append(word) else: resultOfTheRest", "item in resultOfTheRest: item = word + ' ' + item res.append(item) memo[s]", "if not word or not s.startswith(word): continue if len(word) == len(s): res.append(word) else:", "return self._recursiveWordBreak(s, wordDict, {}) def _recursiveWordBreak(self, s, wordDict, memo): if s in memo:", "== len(s): res.append(word) else: resultOfTheRest = self._recursiveWordBreak(s[len(word):], wordDict, memo) for item in resultOfTheRest:", "word or not s.startswith(word): continue if len(word) == len(s): res.append(word) else: resultOfTheRest =", "s: return [] res = [] for word in wordDict: if not word", "return [] res = [] for word in wordDict: if not word or", "self._recursiveWordBreak(s, wordDict, {}) def _recursiveWordBreak(self, s, wordDict, memo): if s in memo: return", "of words. @return: All possible sentences. \"\"\" def wordBreak(self, s, wordDict): # write", "s: A string @param: wordDict: A set of words. @return: All possible sentences.", "len(s): res.append(word) else: resultOfTheRest = self._recursiveWordBreak(s[len(word):], wordDict, memo) for item in resultOfTheRest: item", "res = [] for word in wordDict: if not word or not s.startswith(word):", "class Solution: \"\"\" @param: s: A string @param: wordDict: A set of words.", "Solution: \"\"\" @param: s: A string @param: wordDict: A set of words. @return:", "return memo[s] if not s: return [] res = [] for word in", "in wordDict: if not word or not s.startswith(word): continue if len(word) == len(s):", "res.append(word) else: resultOfTheRest = self._recursiveWordBreak(s[len(word):], wordDict, memo) for item in resultOfTheRest: item =", "code here return self._recursiveWordBreak(s, wordDict, {}) def _recursiveWordBreak(self, s, wordDict, memo): if s", "\"\"\" def wordBreak(self, s, wordDict): # write your code here return self._recursiveWordBreak(s, wordDict,", "wordBreak(self, s, wordDict): # write your code here return self._recursiveWordBreak(s, wordDict, {}) def", "@return: All possible sentences. \"\"\" def wordBreak(self, s, wordDict): # write your code", "possible sentences. \"\"\" def wordBreak(self, s, wordDict): # write your code here return", "[] for word in wordDict: if not word or not s.startswith(word): continue if", "if s in memo: return memo[s] if not s: return [] res =", "_recursiveWordBreak(self, s, wordDict, memo): if s in memo: return memo[s] if not s:", "item = word + ' ' + item res.append(item) memo[s] = res return", "word in wordDict: if not word or not s.startswith(word): continue if len(word) ==", "A set of words. @return: All possible sentences. \"\"\" def wordBreak(self, s, wordDict):", "def _recursiveWordBreak(self, s, wordDict, memo): if s in memo: return memo[s] if not", "All possible sentences. \"\"\" def wordBreak(self, s, wordDict): # write your code here", "@param: s: A string @param: wordDict: A set of words. @return: All possible", "memo): if s in memo: return memo[s] if not s: return [] res", "wordDict: if not word or not s.startswith(word): continue if len(word) == len(s): res.append(word)", "write your code here return self._recursiveWordBreak(s, wordDict, {}) def _recursiveWordBreak(self, s, wordDict, memo):", "\"\"\" @param: s: A string @param: wordDict: A set of words. @return: All", "not s: return [] res = [] for word in wordDict: if not", "resultOfTheRest = self._recursiveWordBreak(s[len(word):], wordDict, memo) for item in resultOfTheRest: item = word +", "in memo: return memo[s] if not s: return [] res = [] for", "wordDict, memo) for item in resultOfTheRest: item = word + ' ' +", "{}) def _recursiveWordBreak(self, s, wordDict, memo): if s in memo: return memo[s] if", "for word in wordDict: if not word or not s.startswith(word): continue if len(word)", "def wordBreak(self, s, wordDict): # write your code here return self._recursiveWordBreak(s, wordDict, {})", "[] res = [] for word in wordDict: if not word or not", "s.startswith(word): continue if len(word) == len(s): res.append(word) else: resultOfTheRest = self._recursiveWordBreak(s[len(word):], wordDict, memo)", "self._recursiveWordBreak(s[len(word):], wordDict, memo) for item in resultOfTheRest: item = word + ' '", "else: resultOfTheRest = self._recursiveWordBreak(s[len(word):], wordDict, memo) for item in resultOfTheRest: item = word", "wordDict, {}) def _recursiveWordBreak(self, s, wordDict, memo): if s in memo: return memo[s]" ]
[ "assert success, error assert len(sub_dict) == 2 # 2 new subscriptions success, error,", "== 2 def test_update_subscriptions(): \"\"\" tests ingress._update_subscriptions routine \"\"\" # wrong dataframe success,", "pd.read_csv( os.path.join(CONST_TEST_DIR_DATA, CONST_TEST2_FILENAME) ) # getting the courses success, error, course_dict = _update_courses(ENGINE,", "wrong_df) assert success is False, error # good data success, error, sub_dict =", "False, error # empty dataframe success, error, _, _, sub_new_list, sub_update_list = update_edu_data(", "not a dataframe ( success, error, _, _, sub_new_list, sub_update_list ) = update_edu_data(ENGINE,", "# getting the subscriptions success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error", "the subscriptions success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df_local) assert success, error assert len(sub_dict)", "= _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 # getting the", "_update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) == 2 # 2 new subscriptions", "import pandas as pd from sqlalchemy import create_engine from edunotice.ingress import ( _update_courses,", "2 success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert len(lab_dict)", "= _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) == 2 def test_update_details_1(): \"\"\"", "success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df_local) assert success, error assert len(sub_dict) == 3", "import ( _update_courses, _update_labs, _update_subscriptions, _update_details, update_edu_data, ) from edunotice.constants import ( CONST_TEST_DIR_DATA,", "# getting the courses success, error, course_dict = _update_courses(ENGINE, eduhub_df_local) assert success, error", "sub_new_list, sub_update_list ) = update_edu_data(ENGINE, None) assert success is False, error # empty", "error, _, _, sub_new_list, sub_update_list ) = update_edu_data(ENGINE, None) assert success is False,", "len(lab_dict) == 2 # getting the subscriptions success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1)", "assert len(sub_dict) == 3 success, error, new_list, update_list = _update_details( ENGINE, eduhub_df_local, lab_dict,", "error assert len(sub_dict) == 2 def test_update_details_1(): \"\"\" tests ingress._update_details routine 2 new", "2 new subscriptions \"\"\" # getting the courses success, error, course_dict = _update_courses(ENGINE,", "_, _, sub_new_list, sub_update_list ) = update_edu_data(ENGINE, None) assert success is False, error", "sub_update_list ) = update_edu_data(ENGINE, None) assert success is False, error # empty dataframe", "success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2", "31, 2, 3], }, index=[\"Cochice\", \"Pima\", \"<NAME>\", \"Maricopa\", \"Yuma\"], ) # good data", "_, sub_new_list, sub_update_list = update_edu_data( ENGINE, eduhub_df1 ) assert success, error assert len(sub_new_list)", "edunotice.ingress import ( _update_courses, _update_labs, _update_subscriptions, _update_details, update_edu_data, ) from edunotice.constants import (", "error, course_dict = _update_courses(ENGINE, eduhub_df_local) assert success, error assert len(course_dict) == 2 #", "assert len(lab_dict) == 2 # getting the subscriptions success, error, sub_dict = _update_subscriptions(ENGINE,", "assert success is False, error # good data success, error, course_dict = _update_courses(ENGINE,", "def test_update_details_1(): \"\"\" tests ingress._update_details routine 2 new subscriptions \"\"\" # getting the", "course_dict) assert success, error assert len(lab_dict) == 2 success, error, lab_dict = _update_labs(ENGINE,", "success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert len(lab_dict) ==", "course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 def test_update_labs():", "error # good data success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success,", "import os import pandas as pd from sqlalchemy import create_engine from edunotice.ingress import", "create_engine(\"%s/%s\" % (SQL_CONNECTION_STRING, SQL_TEST_DBNAME1)) def test_update_courses(): \"\"\" tests ingress._update_courses routine \"\"\" # wrong", "= _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert len(lab_dict) == 2 def test_update_subscriptions():", "dataframe ( success, error, _, _, sub_new_list, sub_update_list ) = update_edu_data(ENGINE, None) assert", "len(lab_dict) == 2 # getting the subscriptions success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df_local)", "ingress.update_edu_data routine \"\"\" # not a dataframe ( success, error, _, _, sub_new_list,", "tests ingress._update_courses routine \"\"\" # wrong dataframe success, error, _ = _update_courses(ENGINE, wrong_df)", "_, _, sub_new_list, sub_update_list = update_edu_data( ENGINE, pd.DataFrame() ) assert success is False,", "len(sub_dict) == 3 success, error, new_list, update_list = _update_details( ENGINE, eduhub_df_local, lab_dict, sub_dict", "\"\"\" # getting the courses success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success,", "\"\"\" tests ingress._update_labs routine \"\"\" # getting the courses success, error, course_dict =", "error assert len(course_dict) == 2 # getting the labs success, error, lab_dict =", "error assert len(lab_dict) == 2 def test_update_subscriptions(): \"\"\" tests ingress._update_subscriptions routine \"\"\" #", "eduhub_df1) assert success, error assert len(sub_dict) == 2 def test_update_details_1(): \"\"\" tests ingress._update_details", "success is False, error # real data success, error, _, _, sub_new_list, sub_update_list", "eduhub_df1) assert success, error assert len(course_dict) == 2 # getting the labs success,", "\"Yuma\"], ) # good data file_path1 = os.path.join(CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME) eduhub_df1 = pd.read_csv(file_path1) ENGINE", "file_path1 = os.path.join(CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME) eduhub_df1 = pd.read_csv(file_path1) ENGINE = create_engine(\"%s/%s\" % (SQL_CONNECTION_STRING, SQL_TEST_DBNAME1))", "pd.read_csv(file_path1) ENGINE = create_engine(\"%s/%s\" % (SQL_CONNECTION_STRING, SQL_TEST_DBNAME1)) def test_update_courses(): \"\"\" tests ingress._update_courses routine", "routine 1 update \"\"\" eduhub_df_local = pd.read_csv( os.path.join(CONST_TEST_DIR_DATA, CONST_TEST2_FILENAME) ) # getting the", "_update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert len(lab_dict) == 2 # getting the", "eduhub_df1) assert success, error assert len(course_dict) == 2 def test_update_labs(): \"\"\" tests ingress._update_labs", "wrong dataframe wrong_df = pd.DataFrame( { \"name\": [\"Jason\", \"Molly\", \"Tina\", \"Jake\", \"Amy\"], \"year\":", "# getting the labs success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success,", "== 2 # getting the subscriptions success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert", "eduhub_df1, course_dict) assert success, error assert len(lab_dict) == 2 # getting the subscriptions", "len(course_dict) == 2 def test_update_labs(): \"\"\" tests ingress._update_labs routine \"\"\" # getting the", "error # good data success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error", "the courses success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict)", "pandas as pd from sqlalchemy import create_engine from edunotice.ingress import ( _update_courses, _update_labs,", "error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) == 2 #", "# wrong dataframe success, error, _ = _update_courses(ENGINE, wrong_df) assert success is False,", "3 success, error, new_list, update_list = _update_details( ENGINE, eduhub_df_local, lab_dict, sub_dict ) assert", "eduhub_df1, course_dict) assert success, error assert len(lab_dict) == 2 def test_update_subscriptions(): \"\"\" tests", "len(new_list) == 2 assert len(update_list) == 0 def test_update_details_2(): \"\"\" tests ingress._update_details routine", "wrong_df = pd.DataFrame( { \"name\": [\"Jason\", \"Molly\", \"Tina\", \"Jake\", \"Amy\"], \"year\": [2012, 2012,", "2 def test_update_labs(): \"\"\" tests ingress._update_labs routine \"\"\" # getting the courses success,", "success is False, error # good data success, error, course_dict = _update_courses(ENGINE, eduhub_df1)", "error assert len(new_list) == 1 assert len(update_list) == 2 def test_update_edu_data(): \"\"\" tests", "assert success is False, error # good data success, error, sub_dict = _update_subscriptions(ENGINE,", "sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) == 2 success, error,", "dataframe wrong_df = pd.DataFrame( { \"name\": [\"Jason\", \"Molly\", \"Tina\", \"Jake\", \"Amy\"], \"year\": [2012,", "len(update_list) == 2 def test_update_edu_data(): \"\"\" tests ingress.update_edu_data routine \"\"\" # not a", "len(course_dict) == 2 success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert", "3], }, index=[\"Cochice\", \"Pima\", \"<NAME>\", \"Maricopa\", \"Yuma\"], ) # good data file_path1 =", "os.path.join(CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME) eduhub_df1 = pd.read_csv(file_path1) ENGINE = create_engine(\"%s/%s\" % (SQL_CONNECTION_STRING, SQL_TEST_DBNAME1)) def test_update_courses():", "error, new_list, update_list = _update_details( ENGINE, eduhub_df_local, lab_dict, sub_dict ) assert success, error", "CONST_TEST2_FILENAME) ) # getting the courses success, error, course_dict = _update_courses(ENGINE, eduhub_df_local) assert", "test_update_edu_data(): \"\"\" tests ingress.update_edu_data routine \"\"\" # not a dataframe ( success, error,", "len(course_dict) == 2 # getting the labs success, error, lab_dict = _update_labs(ENGINE, eduhub_df1,", "sub_new_list, sub_update_list = update_edu_data( ENGINE, eduhub_df1 ) assert success, error assert len(sub_new_list) ==", "success is False, error # good data success, error, lab_dict = _update_labs(ENGINE, eduhub_df1,", "dataframe success, error, _ = _update_labs(ENGINE, wrong_df, course_dict) assert success is False, error", "\"\"\" tests ingress.update_edu_data routine \"\"\" # not a dataframe ( success, error, _,", "assert success is False, error # empty dataframe success, error, _, _, sub_new_list,", "error assert len(lab_dict) == 2 # getting the subscriptions success, error, sub_dict =", "dataframe success, error, _ = _update_subscriptions(ENGINE, wrong_df) assert success is False, error #", "[2012, 2012, 2013, 2014, 2014], \"reports\": [4, 24, 31, 2, 3], }, index=[\"Cochice\",", "error, _, _, sub_new_list, sub_update_list = update_edu_data( ENGINE, pd.DataFrame() ) assert success is", "good data success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict)", "\"\"\" eduhub_df_local = pd.read_csv( os.path.join(CONST_TEST_DIR_DATA, CONST_TEST2_FILENAME) ) # getting the courses success, error,", "lab_dict = _update_labs( ENGINE, eduhub_df_local, course_dict ) assert success, error assert len(lab_dict) ==", "2 def test_update_subscriptions(): \"\"\" tests ingress._update_subscriptions routine \"\"\" # wrong dataframe success, error,", "assert len(course_dict) == 2 def test_update_labs(): \"\"\" tests ingress._update_labs routine \"\"\" # getting", "eduhub_df_local = pd.read_csv( os.path.join(CONST_TEST_DIR_DATA, CONST_TEST2_FILENAME) ) # getting the courses success, error, course_dict", "2 # getting the labs success, error, lab_dict = _update_labs( ENGINE, eduhub_df_local, course_dict", "update \"\"\" eduhub_df_local = pd.read_csv( os.path.join(CONST_TEST_DIR_DATA, CONST_TEST2_FILENAME) ) # getting the courses success,", "course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 success, error,", "\"\"\" Test ingress.py module \"\"\" import os import pandas as pd from sqlalchemy", "the labs success, error, lab_dict = _update_labs( ENGINE, eduhub_df_local, course_dict ) assert success,", "success, error, _, _, sub_new_list, sub_update_list = update_edu_data( ENGINE, eduhub_df1 ) assert success,", "# wrong dataframe success, error, _ = _update_subscriptions(ENGINE, wrong_df) assert success is False,", "data success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) ==", "eduhub_df1) assert success, error assert len(sub_dict) == 2 success, error, sub_dict = _update_subscriptions(ENGINE,", "\"reports\": [4, 24, 31, 2, 3], }, index=[\"Cochice\", \"Pima\", \"<NAME>\", \"Maricopa\", \"Yuma\"], )", "sqlalchemy import create_engine from edunotice.ingress import ( _update_courses, _update_labs, _update_subscriptions, _update_details, update_edu_data, )", ") # getting the courses success, error, course_dict = _update_courses(ENGINE, eduhub_df_local) assert success,", "error, sub_dict = _update_subscriptions(ENGINE, eduhub_df_local) assert success, error assert len(sub_dict) == 3 success,", "success, error, course_dict = _update_courses(ENGINE, eduhub_df_local) assert success, error assert len(course_dict) == 2", "}, index=[\"Cochice\", \"Pima\", \"<NAME>\", \"Maricopa\", \"Yuma\"], ) # good data file_path1 = os.path.join(CONST_TEST_DIR_DATA,", "assert success, error assert len(lab_dict) == 2 success, error, lab_dict = _update_labs(ENGINE, eduhub_df1,", "good data file_path1 = os.path.join(CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME) eduhub_df1 = pd.read_csv(file_path1) ENGINE = create_engine(\"%s/%s\" %", "2 # getting the subscriptions success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df_local) assert success,", "eduhub_df1 = pd.read_csv(file_path1) ENGINE = create_engine(\"%s/%s\" % (SQL_CONNECTION_STRING, SQL_TEST_DBNAME1)) def test_update_courses(): \"\"\" tests", "success, error assert len(course_dict) == 2 def test_update_labs(): \"\"\" tests ingress._update_labs routine \"\"\"", "error assert len(course_dict) == 2 def test_update_labs(): \"\"\" tests ingress._update_labs routine \"\"\" #", "tests ingress._update_labs routine \"\"\" # getting the courses success, error, course_dict = _update_courses(ENGINE,", "def test_update_courses(): \"\"\" tests ingress._update_courses routine \"\"\" # wrong dataframe success, error, _", "2 def test_update_details_1(): \"\"\" tests ingress._update_details routine 2 new subscriptions \"\"\" # getting", "== 2 success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert", "ENGINE, eduhub_df_local, course_dict ) assert success, error assert len(lab_dict) == 2 # getting", "a dataframe ( success, error, _, _, sub_new_list, sub_update_list ) = update_edu_data(ENGINE, None)", "_update_details( ENGINE, eduhub_df_local, lab_dict, sub_dict ) assert success, error assert len(new_list) == 1", "eduhub_df1) assert success, error assert len(sub_dict) == 2 # 2 new subscriptions success,", "routine \"\"\" # not a dataframe ( success, error, _, _, sub_new_list, sub_update_list", "= _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 # wrong dataframe", "update_edu_data(ENGINE, None) assert success is False, error # empty dataframe success, error, _,", "2 assert len(update_list) == 0 def test_update_details_2(): \"\"\" tests ingress._update_details routine 1 update", "= _update_courses(ENGINE, eduhub_df_local) assert success, error assert len(course_dict) == 2 # getting the", "good data success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict)", "assert len(new_list) == 1 assert len(update_list) == 2 def test_update_edu_data(): \"\"\" tests ingress.update_edu_data", "from edunotice.constants import ( CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME, CONST_TEST2_FILENAME, SQL_CONNECTION_STRING, SQL_TEST_DBNAME1, ) # wrong dataframe", "data success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) ==", "tests ingress.update_edu_data routine \"\"\" # not a dataframe ( success, error, _, _,", "empty dataframe success, error, _, _, sub_new_list, sub_update_list = update_edu_data( ENGINE, pd.DataFrame() )", ") assert success, error assert len(new_list) == 1 assert len(update_list) == 2 def", "_update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) == 2 success, error, sub_dict =", "success, error assert len(lab_dict) == 2 # getting the subscriptions success, error, sub_dict", "# getting the subscriptions success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df_local) assert success, error", "== 2 def test_update_labs(): \"\"\" tests ingress._update_labs routine \"\"\" # getting the courses", "ENGINE = create_engine(\"%s/%s\" % (SQL_CONNECTION_STRING, SQL_TEST_DBNAME1)) def test_update_courses(): \"\"\" tests ingress._update_courses routine \"\"\"", "success, error assert len(new_list) == 2 assert len(update_list) == 0 def test_update_details_2(): \"\"\"", "% (SQL_CONNECTION_STRING, SQL_TEST_DBNAME1)) def test_update_courses(): \"\"\" tests ingress._update_courses routine \"\"\" # wrong dataframe", "_, sub_new_list, sub_update_list = update_edu_data( ENGINE, pd.DataFrame() ) assert success is False, error", "2 success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) ==", "sub_dict ) assert success, error assert len(new_list) == 2 assert len(update_list) == 0", "_update_subscriptions, _update_details, update_edu_data, ) from edunotice.constants import ( CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME, CONST_TEST2_FILENAME, SQL_CONNECTION_STRING, SQL_TEST_DBNAME1,", "_update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert len(lab_dict) == 2 success, error, lab_dict", "len(course_dict) == 2 # wrong dataframe success, error, _ = _update_labs(ENGINE, wrong_df, course_dict)", "routine \"\"\" # wrong dataframe success, error, _ = _update_courses(ENGINE, wrong_df) assert success", "ingress._update_courses routine \"\"\" # wrong dataframe success, error, _ = _update_courses(ENGINE, wrong_df) assert", "0 def test_update_details_2(): \"\"\" tests ingress._update_details routine 1 update \"\"\" eduhub_df_local = pd.read_csv(", "# good data success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert", "subscriptions \"\"\" # getting the courses success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert", "\"name\": [\"Jason\", \"Molly\", \"Tina\", \"Jake\", \"Amy\"], \"year\": [2012, 2012, 2013, 2014, 2014], \"reports\":", "data success, error, _, _, sub_new_list, sub_update_list = update_edu_data( ENGINE, eduhub_df1 ) assert", "len(lab_dict) == 2 success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error", "error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 def", "_update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 def test_update_labs(): \"\"\" tests", "success, error, new_list, update_list = _update_details( ENGINE, eduhub_df1, lab_dict, sub_dict ) assert success,", "new subscriptions success, error, new_list, update_list = _update_details( ENGINE, eduhub_df1, lab_dict, sub_dict )", "success, error assert len(sub_dict) == 2 # 2 new subscriptions success, error, new_list,", "2 new subscriptions success, error, new_list, update_list = _update_details( ENGINE, eduhub_df1, lab_dict, sub_dict", "tests ingress._update_details routine 1 update \"\"\" eduhub_df_local = pd.read_csv( os.path.join(CONST_TEST_DIR_DATA, CONST_TEST2_FILENAME) ) #", "\"\"\" import os import pandas as pd from sqlalchemy import create_engine from edunotice.ingress", "getting the courses success, error, course_dict = _update_courses(ENGINE, eduhub_df_local) assert success, error assert", "CONST_TEST1_FILENAME) eduhub_df1 = pd.read_csv(file_path1) ENGINE = create_engine(\"%s/%s\" % (SQL_CONNECTION_STRING, SQL_TEST_DBNAME1)) def test_update_courses(): \"\"\"", "courses success, error, course_dict = _update_courses(ENGINE, eduhub_df_local) assert success, error assert len(course_dict) ==", "_update_labs( ENGINE, eduhub_df_local, course_dict ) assert success, error assert len(lab_dict) == 2 #", "# good data success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error", "sub_dict ) assert success, error assert len(new_list) == 1 assert len(update_list) == 2", "success, error assert len(course_dict) == 2 success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert", "error, lab_dict = _update_labs( ENGINE, eduhub_df_local, course_dict ) assert success, error assert len(lab_dict)", "_update_courses, _update_labs, _update_subscriptions, _update_details, update_edu_data, ) from edunotice.constants import ( CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME, CONST_TEST2_FILENAME,", "courses success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) ==", "pd.DataFrame() ) assert success is False, error # real data success, error, _,", "CONST_TEST2_FILENAME, SQL_CONNECTION_STRING, SQL_TEST_DBNAME1, ) # wrong dataframe wrong_df = pd.DataFrame( { \"name\": [\"Jason\",", "test_update_subscriptions(): \"\"\" tests ingress._update_subscriptions routine \"\"\" # wrong dataframe success, error, _ =", "len(sub_dict) == 2 def test_update_details_1(): \"\"\" tests ingress._update_details routine 2 new subscriptions \"\"\"", "== 2 # getting the labs success, error, lab_dict = _update_labs( ENGINE, eduhub_df_local,", "def test_update_edu_data(): \"\"\" tests ingress.update_edu_data routine \"\"\" # not a dataframe ( success,", "getting the labs success, error, lab_dict = _update_labs( ENGINE, eduhub_df_local, course_dict ) assert", "_, _, sub_new_list, sub_update_list = update_edu_data( ENGINE, eduhub_df1 ) assert success, error assert", "= update_edu_data( ENGINE, eduhub_df1 ) assert success, error assert len(sub_new_list) == 0 assert", "24, 31, 2, 3], }, index=[\"Cochice\", \"Pima\", \"<NAME>\", \"Maricopa\", \"Yuma\"], ) # good", "_update_labs(ENGINE, wrong_df, course_dict) assert success is False, error # good data success, error,", "len(course_dict) == 2 # getting the labs success, error, lab_dict = _update_labs( ENGINE,", "wrong dataframe success, error, _ = _update_courses(ENGINE, wrong_df) assert success is False, error", "assert success, error assert len(sub_dict) == 3 success, error, new_list, update_list = _update_details(", "assert success is False, error # real data success, error, _, _, sub_new_list,", "= _update_details( ENGINE, eduhub_df_local, lab_dict, sub_dict ) assert success, error assert len(new_list) ==", "assert success, error assert len(new_list) == 2 assert len(update_list) == 0 def test_update_details_2():", "labs success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert len(lab_dict)", "\"year\": [2012, 2012, 2013, 2014, 2014], \"reports\": [4, 24, 31, 2, 3], },", "wrong_df, course_dict) assert success is False, error # good data success, error, lab_dict", "success, error assert len(sub_dict) == 2 def test_update_details_1(): \"\"\" tests ingress._update_details routine 2", "assert len(sub_dict) == 2 # 2 new subscriptions success, error, new_list, update_list =", "[4, 24, 31, 2, 3], }, index=[\"Cochice\", \"Pima\", \"<NAME>\", \"Maricopa\", \"Yuma\"], ) #", "success, error assert len(course_dict) == 2 # wrong dataframe success, error, _ =", "Test ingress.py module \"\"\" import os import pandas as pd from sqlalchemy import", "error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) == 2 success,", "2 # getting the subscriptions success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success,", "routine \"\"\" # getting the courses success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert", "create_engine from edunotice.ingress import ( _update_courses, _update_labs, _update_subscriptions, _update_details, update_edu_data, ) from edunotice.constants", "success, error assert len(new_list) == 1 assert len(update_list) == 2 def test_update_edu_data(): \"\"\"", "\"Maricopa\", \"Yuma\"], ) # good data file_path1 = os.path.join(CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME) eduhub_df1 = pd.read_csv(file_path1)", "_update_details, update_edu_data, ) from edunotice.constants import ( CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME, CONST_TEST2_FILENAME, SQL_CONNECTION_STRING, SQL_TEST_DBNAME1, )", "is False, error # empty dataframe success, error, _, _, sub_new_list, sub_update_list =", "eduhub_df_local, course_dict ) assert success, error assert len(lab_dict) == 2 # getting the", "dataframe success, error, _, _, sub_new_list, sub_update_list = update_edu_data( ENGINE, pd.DataFrame() ) assert", ") = update_edu_data(ENGINE, None) assert success is False, error # empty dataframe success,", "= _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) == 2 # 2 new", "routine \"\"\" # wrong dataframe success, error, _ = _update_subscriptions(ENGINE, wrong_df) assert success", "sub_new_list, sub_update_list = update_edu_data( ENGINE, pd.DataFrame() ) assert success is False, error #", "( success, error, _, _, sub_new_list, sub_update_list ) = update_edu_data(ENGINE, None) assert success", "= _update_labs(ENGINE, wrong_df, course_dict) assert success is False, error # good data success,", "== 0 def test_update_details_2(): \"\"\" tests ingress._update_details routine 1 update \"\"\" eduhub_df_local =", "update_list = _update_details( ENGINE, eduhub_df1, lab_dict, sub_dict ) assert success, error assert len(new_list)", "tests ingress._update_subscriptions routine \"\"\" # wrong dataframe success, error, _ = _update_subscriptions(ENGINE, wrong_df)", "= _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert len(lab_dict) == 2 success, error,", "# real data success, error, _, _, sub_new_list, sub_update_list = update_edu_data( ENGINE, eduhub_df1", "{ \"name\": [\"Jason\", \"Molly\", \"Tina\", \"Jake\", \"Amy\"], \"year\": [2012, 2012, 2013, 2014, 2014],", "assert success, error assert len(new_list) == 1 assert len(update_list) == 2 def test_update_edu_data():", "= create_engine(\"%s/%s\" % (SQL_CONNECTION_STRING, SQL_TEST_DBNAME1)) def test_update_courses(): \"\"\" tests ingress._update_courses routine \"\"\" #", "the subscriptions success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict)", "subscriptions success, error, new_list, update_list = _update_details( ENGINE, eduhub_df1, lab_dict, sub_dict ) assert", "success, error, _ = _update_subscriptions(ENGINE, wrong_df) assert success is False, error # good", "labs success, error, lab_dict = _update_labs( ENGINE, eduhub_df_local, course_dict ) assert success, error", "# wrong dataframe success, error, _ = _update_labs(ENGINE, wrong_df, course_dict) assert success is", "getting the labs success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error", "= _update_subscriptions(ENGINE, eduhub_df_local) assert success, error assert len(sub_dict) == 3 success, error, new_list,", "import ( CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME, CONST_TEST2_FILENAME, SQL_CONNECTION_STRING, SQL_TEST_DBNAME1, ) # wrong dataframe wrong_df =", "error assert len(lab_dict) == 2 success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert", "assert success is False, error # good data success, error, lab_dict = _update_labs(ENGINE,", "_update_subscriptions(ENGINE, wrong_df) assert success is False, error # good data success, error, sub_dict", "_update_subscriptions(ENGINE, eduhub_df_local) assert success, error assert len(sub_dict) == 3 success, error, new_list, update_list", "CONST_TEST1_FILENAME, CONST_TEST2_FILENAME, SQL_CONNECTION_STRING, SQL_TEST_DBNAME1, ) # wrong dataframe wrong_df = pd.DataFrame( { \"name\":", "_update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 # wrong dataframe success,", "eduhub_df_local) assert success, error assert len(course_dict) == 2 # getting the labs success,", "subscriptions success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df_local) assert success, error assert len(sub_dict) ==", "edunotice.constants import ( CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME, CONST_TEST2_FILENAME, SQL_CONNECTION_STRING, SQL_TEST_DBNAME1, ) # wrong dataframe wrong_df", "def test_update_subscriptions(): \"\"\" tests ingress._update_subscriptions routine \"\"\" # wrong dataframe success, error, _", "ingress.py module \"\"\" import os import pandas as pd from sqlalchemy import create_engine", "success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) == 2", "is False, error # real data success, error, _, _, sub_new_list, sub_update_list =", "= _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) == 2 success, error, sub_dict", "success is False, error # good data success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1)", "ingress._update_details routine 2 new subscriptions \"\"\" # getting the courses success, error, course_dict", "dataframe success, error, _ = _update_courses(ENGINE, wrong_df) assert success is False, error #", "success, error assert len(lab_dict) == 2 success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict)", "= _update_details( ENGINE, eduhub_df1, lab_dict, sub_dict ) assert success, error assert len(new_list) ==", "eduhub_df1, course_dict) assert success, error assert len(lab_dict) == 2 success, error, lab_dict =", "test_update_details_2(): \"\"\" tests ingress._update_details routine 1 update \"\"\" eduhub_df_local = pd.read_csv( os.path.join(CONST_TEST_DIR_DATA, CONST_TEST2_FILENAME)", "2012, 2013, 2014, 2014], \"reports\": [4, 24, 31, 2, 3], }, index=[\"Cochice\", \"Pima\",", "error, _ = _update_labs(ENGINE, wrong_df, course_dict) assert success is False, error # good", "success, error assert len(lab_dict) == 2 def test_update_subscriptions(): \"\"\" tests ingress._update_subscriptions routine \"\"\"", "\"\"\" # not a dataframe ( success, error, _, _, sub_new_list, sub_update_list )", "data success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert len(lab_dict)", "assert len(update_list) == 2 def test_update_edu_data(): \"\"\" tests ingress.update_edu_data routine \"\"\" # not", "data file_path1 = os.path.join(CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME) eduhub_df1 = pd.read_csv(file_path1) ENGINE = create_engine(\"%s/%s\" % (SQL_CONNECTION_STRING,", "= pd.read_csv(file_path1) ENGINE = create_engine(\"%s/%s\" % (SQL_CONNECTION_STRING, SQL_TEST_DBNAME1)) def test_update_courses(): \"\"\" tests ingress._update_courses", "assert success, error assert len(lab_dict) == 2 def test_update_subscriptions(): \"\"\" tests ingress._update_subscriptions routine", "update_list = _update_details( ENGINE, eduhub_df_local, lab_dict, sub_dict ) assert success, error assert len(new_list)", "ENGINE, pd.DataFrame() ) assert success is False, error # real data success, error,", "the courses success, error, course_dict = _update_courses(ENGINE, eduhub_df_local) assert success, error assert len(course_dict)", "# good data file_path1 = os.path.join(CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME) eduhub_df1 = pd.read_csv(file_path1) ENGINE = create_engine(\"%s/%s\"", "routine 2 new subscriptions \"\"\" # getting the courses success, error, course_dict =", ") from edunotice.constants import ( CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME, CONST_TEST2_FILENAME, SQL_CONNECTION_STRING, SQL_TEST_DBNAME1, ) # wrong", "course_dict) assert success is False, error # good data success, error, lab_dict =", "eduhub_df1, lab_dict, sub_dict ) assert success, error assert len(new_list) == 2 assert len(update_list)", "the labs success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert", "len(new_list) == 1 assert len(update_list) == 2 def test_update_edu_data(): \"\"\" tests ingress.update_edu_data routine", "success, error assert len(sub_dict) == 3 success, error, new_list, update_list = _update_details( ENGINE,", "_update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 # getting the labs", "assert len(course_dict) == 2 success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error", "eduhub_df_local, lab_dict, sub_dict ) assert success, error assert len(new_list) == 1 assert len(update_list)", "== 2 def test_update_edu_data(): \"\"\" tests ingress.update_edu_data routine \"\"\" # not a dataframe", "course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 # getting", "\"\"\" tests ingress._update_details routine 1 update \"\"\" eduhub_df_local = pd.read_csv( os.path.join(CONST_TEST_DIR_DATA, CONST_TEST2_FILENAME) )", "# 2 new subscriptions success, error, new_list, update_list = _update_details( ENGINE, eduhub_df1, lab_dict,", "_update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 success, error, course_dict =", "wrong_df) assert success is False, error # good data success, error, course_dict =", "_update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert len(lab_dict) == 2 def test_update_subscriptions(): \"\"\"", "len(sub_dict) == 2 success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert", "pd.DataFrame( { \"name\": [\"Jason\", \"Molly\", \"Tina\", \"Jake\", \"Amy\"], \"year\": [2012, 2012, 2013, 2014,", "course_dict ) assert success, error assert len(lab_dict) == 2 # getting the subscriptions", "success, error, new_list, update_list = _update_details( ENGINE, eduhub_df_local, lab_dict, sub_dict ) assert success,", "module \"\"\" import os import pandas as pd from sqlalchemy import create_engine from", "success is False, error # empty dataframe success, error, _, _, sub_new_list, sub_update_list", "(SQL_CONNECTION_STRING, SQL_TEST_DBNAME1)) def test_update_courses(): \"\"\" tests ingress._update_courses routine \"\"\" # wrong dataframe success,", "error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) == 2 def", "assert len(update_list) == 0 def test_update_details_2(): \"\"\" tests ingress._update_details routine 1 update \"\"\"", "\"\"\" # wrong dataframe success, error, _ = _update_courses(ENGINE, wrong_df) assert success is", "== 2 # getting the labs success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict)", "\"Tina\", \"Jake\", \"Amy\"], \"year\": [2012, 2012, 2013, 2014, 2014], \"reports\": [4, 24, 31,", "update_edu_data( ENGINE, pd.DataFrame() ) assert success is False, error # real data success,", "update_edu_data, ) from edunotice.constants import ( CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME, CONST_TEST2_FILENAME, SQL_CONNECTION_STRING, SQL_TEST_DBNAME1, ) #", "test_update_labs(): \"\"\" tests ingress._update_labs routine \"\"\" # getting the courses success, error, course_dict", "1 update \"\"\" eduhub_df_local = pd.read_csv( os.path.join(CONST_TEST_DIR_DATA, CONST_TEST2_FILENAME) ) # getting the courses", "error assert len(course_dict) == 2 # wrong dataframe success, error, _ = _update_labs(ENGINE,", "error assert len(sub_dict) == 2 success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success,", "== 1 assert len(update_list) == 2 def test_update_edu_data(): \"\"\" tests ingress.update_edu_data routine \"\"\"", "= update_edu_data(ENGINE, None) assert success is False, error # empty dataframe success, error,", "2, 3], }, index=[\"Cochice\", \"Pima\", \"<NAME>\", \"Maricopa\", \"Yuma\"], ) # good data file_path1", "2 success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) ==", "\"\"\" tests ingress._update_courses routine \"\"\" # wrong dataframe success, error, _ = _update_courses(ENGINE,", "wrong dataframe success, error, _ = _update_labs(ENGINE, wrong_df, course_dict) assert success is False,", "= _update_labs( ENGINE, eduhub_df_local, course_dict ) assert success, error assert len(lab_dict) == 2", "index=[\"Cochice\", \"Pima\", \"<NAME>\", \"Maricopa\", \"Yuma\"], ) # good data file_path1 = os.path.join(CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME)", "course_dict = _update_courses(ENGINE, eduhub_df_local) assert success, error assert len(course_dict) == 2 # getting", "success, error, _, _, sub_new_list, sub_update_list ) = update_edu_data(ENGINE, None) assert success is", "== 3 success, error, new_list, update_list = _update_details( ENGINE, eduhub_df_local, lab_dict, sub_dict )", "sub_update_list = update_edu_data( ENGINE, pd.DataFrame() ) assert success is False, error # real", "_ = _update_subscriptions(ENGINE, wrong_df) assert success is False, error # good data success,", "== 2 success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict)", ") assert success, error assert len(new_list) == 2 assert len(update_list) == 0 def", "CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME, CONST_TEST2_FILENAME, SQL_CONNECTION_STRING, SQL_TEST_DBNAME1, ) # wrong dataframe wrong_df = pd.DataFrame( {", "error assert len(course_dict) == 2 success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success,", "ingress._update_details routine 1 update \"\"\" eduhub_df_local = pd.read_csv( os.path.join(CONST_TEST_DIR_DATA, CONST_TEST2_FILENAME) ) # getting", "# getting the courses success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error", "1 assert len(update_list) == 2 def test_update_edu_data(): \"\"\" tests ingress.update_edu_data routine \"\"\" #", "assert len(lab_dict) == 2 success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success,", "2 # getting the labs success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert", "== 2 # 2 new subscriptions success, error, new_list, update_list = _update_details( ENGINE,", "sub_update_list = update_edu_data( ENGINE, eduhub_df1 ) assert success, error assert len(sub_new_list) == 0", "error assert len(sub_dict) == 2 # 2 new subscriptions success, error, new_list, update_list", "error, _, _, sub_new_list, sub_update_list = update_edu_data( ENGINE, eduhub_df1 ) assert success, error", "= os.path.join(CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME) eduhub_df1 = pd.read_csv(file_path1) ENGINE = create_engine(\"%s/%s\" % (SQL_CONNECTION_STRING, SQL_TEST_DBNAME1)) def", "SQL_CONNECTION_STRING, SQL_TEST_DBNAME1, ) # wrong dataframe wrong_df = pd.DataFrame( { \"name\": [\"Jason\", \"Molly\",", "_update_courses(ENGINE, eduhub_df_local) assert success, error assert len(course_dict) == 2 # getting the labs", "good data success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert", "= _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 success, error, course_dict", "is False, error # good data success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert", "= _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert len(lab_dict) == 2 # getting", "len(lab_dict) == 2 def test_update_subscriptions(): \"\"\" tests ingress._update_subscriptions routine \"\"\" # wrong dataframe", "def test_update_labs(): \"\"\" tests ingress._update_labs routine \"\"\" # getting the courses success, error,", "import create_engine from edunotice.ingress import ( _update_courses, _update_labs, _update_subscriptions, _update_details, update_edu_data, ) from", "assert success, error assert len(lab_dict) == 2 # getting the subscriptions success, error,", "None) assert success is False, error # empty dataframe success, error, _, _,", "lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert len(lab_dict) == 2 success,", "ENGINE, eduhub_df1 ) assert success, error assert len(sub_new_list) == 0 assert len(sub_update_list) ==", "os.path.join(CONST_TEST_DIR_DATA, CONST_TEST2_FILENAME) ) # getting the courses success, error, course_dict = _update_courses(ENGINE, eduhub_df_local)", "_update_labs, _update_subscriptions, _update_details, update_edu_data, ) from edunotice.constants import ( CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME, CONST_TEST2_FILENAME, SQL_CONNECTION_STRING,", "assert success, error assert len(course_dict) == 2 # getting the labs success, error,", "= _update_courses(ENGINE, wrong_df) assert success is False, error # good data success, error,", "lab_dict, sub_dict ) assert success, error assert len(new_list) == 1 assert len(update_list) ==", "# not a dataframe ( success, error, _, _, sub_new_list, sub_update_list ) =", "# wrong dataframe wrong_df = pd.DataFrame( { \"name\": [\"Jason\", \"Molly\", \"Tina\", \"Jake\", \"Amy\"],", "ENGINE, eduhub_df_local, lab_dict, sub_dict ) assert success, error assert len(new_list) == 1 assert", "ingress._update_labs routine \"\"\" # getting the courses success, error, course_dict = _update_courses(ENGINE, eduhub_df1)", "_update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) == 2 def test_update_details_1(): \"\"\" tests", "[\"Jason\", \"Molly\", \"Tina\", \"Jake\", \"Amy\"], \"year\": [2012, 2012, 2013, 2014, 2014], \"reports\": [4,", "from edunotice.ingress import ( _update_courses, _update_labs, _update_subscriptions, _update_details, update_edu_data, ) from edunotice.constants import", "success, error, _ = _update_labs(ENGINE, wrong_df, course_dict) assert success is False, error #", "new_list, update_list = _update_details( ENGINE, eduhub_df_local, lab_dict, sub_dict ) assert success, error assert", "error assert len(new_list) == 2 assert len(update_list) == 0 def test_update_details_2(): \"\"\" tests", "assert len(course_dict) == 2 # getting the labs success, error, lab_dict = _update_labs(", "update_edu_data( ENGINE, eduhub_df1 ) assert success, error assert len(sub_new_list) == 0 assert len(sub_update_list)", "error, _ = _update_courses(ENGINE, wrong_df) assert success is False, error # good data", "= update_edu_data( ENGINE, pd.DataFrame() ) assert success is False, error # real data", "== 2 # getting the subscriptions success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df_local) assert", "lab_dict, sub_dict ) assert success, error assert len(new_list) == 2 assert len(update_list) ==", "( CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME, CONST_TEST2_FILENAME, SQL_CONNECTION_STRING, SQL_TEST_DBNAME1, ) # wrong dataframe wrong_df = pd.DataFrame(", "assert len(new_list) == 2 assert len(update_list) == 0 def test_update_details_2(): \"\"\" tests ingress._update_details", "\"\"\" # wrong dataframe success, error, _ = _update_subscriptions(ENGINE, wrong_df) assert success is", "assert success, error assert len(sub_dict) == 2 def test_update_details_1(): \"\"\" tests ingress._update_details routine", "2 # 2 new subscriptions success, error, new_list, update_list = _update_details( ENGINE, eduhub_df1,", "= _update_subscriptions(ENGINE, wrong_df) assert success is False, error # good data success, error,", "eduhub_df_local) assert success, error assert len(sub_dict) == 3 success, error, new_list, update_list =", "SQL_TEST_DBNAME1, ) # wrong dataframe wrong_df = pd.DataFrame( { \"name\": [\"Jason\", \"Molly\", \"Tina\",", "sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) == 2 def test_update_details_1():", "_update_details( ENGINE, eduhub_df1, lab_dict, sub_dict ) assert success, error assert len(new_list) == 2", "assert len(sub_dict) == 2 def test_update_details_1(): \"\"\" tests ingress._update_details routine 2 new subscriptions", "2014, 2014], \"reports\": [4, 24, 31, 2, 3], }, index=[\"Cochice\", \"Pima\", \"<NAME>\", \"Maricopa\",", "error # real data success, error, _, _, sub_new_list, sub_update_list = update_edu_data( ENGINE,", "\"Molly\", \"Tina\", \"Jake\", \"Amy\"], \"year\": [2012, 2012, 2013, 2014, 2014], \"reports\": [4, 24,", "success, error, _, _, sub_new_list, sub_update_list = update_edu_data( ENGINE, pd.DataFrame() ) assert success", "False, error # good data success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success,", "getting the subscriptions success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert", "2 def test_update_edu_data(): \"\"\" tests ingress.update_edu_data routine \"\"\" # not a dataframe (", "is False, error # good data success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert", "wrong dataframe success, error, _ = _update_subscriptions(ENGINE, wrong_df) assert success is False, error", ") assert success, error assert len(lab_dict) == 2 # getting the subscriptions success,", "error assert len(sub_dict) == 3 success, error, new_list, update_list = _update_details( ENGINE, eduhub_df_local,", "\"Pima\", \"<NAME>\", \"Maricopa\", \"Yuma\"], ) # good data file_path1 = os.path.join(CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME) eduhub_df1", "# getting the labs success, error, lab_dict = _update_labs( ENGINE, eduhub_df_local, course_dict )", "# empty dataframe success, error, _, _, sub_new_list, sub_update_list = update_edu_data( ENGINE, pd.DataFrame()", "course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 # wrong", "lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert len(lab_dict) == 2 #", "getting the courses success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert", "assert len(course_dict) == 2 # wrong dataframe success, error, _ = _update_labs(ENGINE, wrong_df,", "real data success, error, _, _, sub_new_list, sub_update_list = update_edu_data( ENGINE, eduhub_df1 )", "_update_courses(ENGINE, wrong_df) assert success is False, error # good data success, error, course_dict", ") assert success is False, error # real data success, error, _, _,", "def test_update_details_2(): \"\"\" tests ingress._update_details routine 1 update \"\"\" eduhub_df_local = pd.read_csv( os.path.join(CONST_TEST_DIR_DATA,", "SQL_TEST_DBNAME1)) def test_update_courses(): \"\"\" tests ingress._update_courses routine \"\"\" # wrong dataframe success, error,", "\"\"\" tests ingress._update_details routine 2 new subscriptions \"\"\" # getting the courses success,", "success, error assert len(course_dict) == 2 # getting the labs success, error, lab_dict", "sub_dict = _update_subscriptions(ENGINE, eduhub_df_local) assert success, error assert len(sub_dict) == 3 success, error,", ") # wrong dataframe wrong_df = pd.DataFrame( { \"name\": [\"Jason\", \"Molly\", \"Tina\", \"Jake\",", "False, error # good data success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert", "2014], \"reports\": [4, 24, 31, 2, 3], }, index=[\"Cochice\", \"Pima\", \"<NAME>\", \"Maricopa\", \"Yuma\"],", "assert len(lab_dict) == 2 def test_update_subscriptions(): \"\"\" tests ingress._update_subscriptions routine \"\"\" # wrong", "len(update_list) == 0 def test_update_details_2(): \"\"\" tests ingress._update_details routine 1 update \"\"\" eduhub_df_local", "test_update_details_1(): \"\"\" tests ingress._update_details routine 2 new subscriptions \"\"\" # getting the courses", "error, _ = _update_subscriptions(ENGINE, wrong_df) assert success is False, error # good data", "error, new_list, update_list = _update_details( ENGINE, eduhub_df1, lab_dict, sub_dict ) assert success, error", "ENGINE, eduhub_df1, lab_dict, sub_dict ) assert success, error assert len(new_list) == 2 assert", "_, sub_new_list, sub_update_list ) = update_edu_data(ENGINE, None) assert success is False, error #", "_ = _update_labs(ENGINE, wrong_df, course_dict) assert success is False, error # good data", "eduhub_df1 ) assert success, error assert len(sub_new_list) == 0 assert len(sub_update_list) == 2", "new_list, update_list = _update_details( ENGINE, eduhub_df1, lab_dict, sub_dict ) assert success, error assert", "os import pandas as pd from sqlalchemy import create_engine from edunotice.ingress import (", "eduhub_df1) assert success, error assert len(course_dict) == 2 success, error, course_dict = _update_courses(ENGINE,", "\"Amy\"], \"year\": [2012, 2012, 2013, 2014, 2014], \"reports\": [4, 24, 31, 2, 3],", "assert success, error assert len(course_dict) == 2 def test_update_labs(): \"\"\" tests ingress._update_labs routine", "( _update_courses, _update_labs, _update_subscriptions, _update_details, update_edu_data, ) from edunotice.constants import ( CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME,", "\"\"\" tests ingress._update_subscriptions routine \"\"\" # wrong dataframe success, error, _ = _update_subscriptions(ENGINE,", "assert success, error assert len(course_dict) == 2 success, error, course_dict = _update_courses(ENGINE, eduhub_df1)", "eduhub_df1) assert success, error assert len(course_dict) == 2 # wrong dataframe success, error,", "assert success, error assert len(course_dict) == 2 # wrong dataframe success, error, _", "assert success, error assert len(sub_dict) == 2 success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1)", "== 2 success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict)", "= pd.DataFrame( { \"name\": [\"Jason\", \"Molly\", \"Tina\", \"Jake\", \"Amy\"], \"year\": [2012, 2012, 2013,", "error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 success,", "is False, error # good data success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict)", "error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 #", "== 2 # wrong dataframe success, error, _ = _update_labs(ENGINE, wrong_df, course_dict) assert", "course_dict) assert success, error assert len(lab_dict) == 2 def test_update_subscriptions(): \"\"\" tests ingress._update_subscriptions", "as pd from sqlalchemy import create_engine from edunotice.ingress import ( _update_courses, _update_labs, _update_subscriptions,", "ingress._update_subscriptions routine \"\"\" # wrong dataframe success, error, _ = _update_subscriptions(ENGINE, wrong_df) assert", "lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert len(lab_dict) == 2 def", "False, error # good data success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success,", "pd from sqlalchemy import create_engine from edunotice.ingress import ( _update_courses, _update_labs, _update_subscriptions, _update_details,", "getting the subscriptions success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df_local) assert success, error assert", "new subscriptions \"\"\" # getting the courses success, error, course_dict = _update_courses(ENGINE, eduhub_df1)", "sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) == 2 # 2", "tests ingress._update_details routine 2 new subscriptions \"\"\" # getting the courses success, error,", "assert len(sub_dict) == 2 success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error", "== 2 def test_update_details_1(): \"\"\" tests ingress._update_details routine 2 new subscriptions \"\"\" #", "== 2 assert len(update_list) == 0 def test_update_details_2(): \"\"\" tests ingress._update_details routine 1", "\"Jake\", \"Amy\"], \"year\": [2012, 2012, 2013, 2014, 2014], \"reports\": [4, 24, 31, 2,", "success, error, _ = _update_courses(ENGINE, wrong_df) assert success is False, error # good", "False, error # real data success, error, _, _, sub_new_list, sub_update_list = update_edu_data(", "error # empty dataframe success, error, _, _, sub_new_list, sub_update_list = update_edu_data( ENGINE,", "course_dict) assert success, error assert len(lab_dict) == 2 # getting the subscriptions success,", "2013, 2014, 2014], \"reports\": [4, 24, 31, 2, 3], }, index=[\"Cochice\", \"Pima\", \"<NAME>\",", "error # good data success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error", "= pd.read_csv( os.path.join(CONST_TEST_DIR_DATA, CONST_TEST2_FILENAME) ) # getting the courses success, error, course_dict =", "= _update_courses(ENGINE, eduhub_df1) assert success, error assert len(course_dict) == 2 def test_update_labs(): \"\"\"", "\"<NAME>\", \"Maricopa\", \"Yuma\"], ) # good data file_path1 = os.path.join(CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME) eduhub_df1 =", "len(sub_dict) == 2 # 2 new subscriptions success, error, new_list, update_list = _update_details(", "assert len(course_dict) == 2 # getting the labs success, error, lab_dict = _update_labs(ENGINE,", "success, error assert len(sub_dict) == 2 success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert", "success, error, lab_dict = _update_labs( ENGINE, eduhub_df_local, course_dict ) assert success, error assert", "_ = _update_courses(ENGINE, wrong_df) assert success is False, error # good data success,", "from sqlalchemy import create_engine from edunotice.ingress import ( _update_courses, _update_labs, _update_subscriptions, _update_details, update_edu_data,", ") # good data file_path1 = os.path.join(CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME) eduhub_df1 = pd.read_csv(file_path1) ENGINE =", "# good data success, error, course_dict = _update_courses(ENGINE, eduhub_df1) assert success, error assert", "2 # wrong dataframe success, error, _ = _update_labs(ENGINE, wrong_df, course_dict) assert success", "test_update_courses(): \"\"\" tests ingress._update_courses routine \"\"\" # wrong dataframe success, error, _ =", "error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict) assert success, error assert len(lab_dict) == 2", "subscriptions success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1) assert success, error assert len(sub_dict) ==" ]
[ "from django.db import transaction from jobya.users.models import User from jobya.users.tests.factories import UserFactory class", "type=int, help=\"Indicates the number of users to be created\", ) @transaction.atomic def handle(self,", "\"Set up users data\" def add_arguments(self, parser): parser.add_argument( \"total\", nargs=\"+\", type=int, help=\"Indicates the", "total = options[\"total\"][0] self.stdout.write(\"Deleting old data...\") # Don't delete superuser User.objects.filter(is_superuser=False).delete() self.stdout.write(\"Creating new", "*args, **options): total = options[\"total\"][0] self.stdout.write(\"Deleting old data...\") # Don't delete superuser User.objects.filter(is_superuser=False).delete()", "from django.core.management.base import BaseCommand from django.db import transaction from jobya.users.models import User from", "help = \"Set up users data\" def add_arguments(self, parser): parser.add_argument( \"total\", nargs=\"+\", type=int,", "created\", ) @transaction.atomic def handle(self, *args, **options): total = options[\"total\"][0] self.stdout.write(\"Deleting old data...\")", "delete superuser User.objects.filter(is_superuser=False).delete() self.stdout.write(\"Creating new data...\") # Create all the users people =", "data\" def add_arguments(self, parser): parser.add_argument( \"total\", nargs=\"+\", type=int, help=\"Indicates the number of users", "def add_arguments(self, parser): parser.add_argument( \"total\", nargs=\"+\", type=int, help=\"Indicates the number of users to", "User from jobya.users.tests.factories import UserFactory class Command(BaseCommand): help = \"Set up users data\"", "number of users to be created\", ) @transaction.atomic def handle(self, *args, **options): total", "**options): total = options[\"total\"][0] self.stdout.write(\"Deleting old data...\") # Don't delete superuser User.objects.filter(is_superuser=False).delete() self.stdout.write(\"Creating", "import transaction from jobya.users.models import User from jobya.users.tests.factories import UserFactory class Command(BaseCommand): help", "add_arguments(self, parser): parser.add_argument( \"total\", nargs=\"+\", type=int, help=\"Indicates the number of users to be", "parser): parser.add_argument( \"total\", nargs=\"+\", type=int, help=\"Indicates the number of users to be created\",", "from jobya.users.tests.factories import UserFactory class Command(BaseCommand): help = \"Set up users data\" def", "to be created\", ) @transaction.atomic def handle(self, *args, **options): total = options[\"total\"][0] self.stdout.write(\"Deleting", "def handle(self, *args, **options): total = options[\"total\"][0] self.stdout.write(\"Deleting old data...\") # Don't delete", ") @transaction.atomic def handle(self, *args, **options): total = options[\"total\"][0] self.stdout.write(\"Deleting old data...\") #", "transaction from jobya.users.models import User from jobya.users.tests.factories import UserFactory class Command(BaseCommand): help =", "options[\"total\"][0] self.stdout.write(\"Deleting old data...\") # Don't delete superuser User.objects.filter(is_superuser=False).delete() self.stdout.write(\"Creating new data...\") #", "Don't delete superuser User.objects.filter(is_superuser=False).delete() self.stdout.write(\"Creating new data...\") # Create all the users people", "data...\") # Create all the users people = [] for _ in range(total):", "all the users people = [] for _ in range(total): person = UserFactory()", "Command(BaseCommand): help = \"Set up users data\" def add_arguments(self, parser): parser.add_argument( \"total\", nargs=\"+\",", "the number of users to be created\", ) @transaction.atomic def handle(self, *args, **options):", "django.db import transaction from jobya.users.models import User from jobya.users.tests.factories import UserFactory class Command(BaseCommand):", "from jobya.users.models import User from jobya.users.tests.factories import UserFactory class Command(BaseCommand): help = \"Set", "BaseCommand from django.db import transaction from jobya.users.models import User from jobya.users.tests.factories import UserFactory", "@transaction.atomic def handle(self, *args, **options): total = options[\"total\"][0] self.stdout.write(\"Deleting old data...\") # Don't", "import UserFactory class Command(BaseCommand): help = \"Set up users data\" def add_arguments(self, parser):", "import BaseCommand from django.db import transaction from jobya.users.models import User from jobya.users.tests.factories import", "self.stdout.write(\"Deleting old data...\") # Don't delete superuser User.objects.filter(is_superuser=False).delete() self.stdout.write(\"Creating new data...\") # Create", "data...\") # Don't delete superuser User.objects.filter(is_superuser=False).delete() self.stdout.write(\"Creating new data...\") # Create all the", "import User from jobya.users.tests.factories import UserFactory class Command(BaseCommand): help = \"Set up users", "= \"Set up users data\" def add_arguments(self, parser): parser.add_argument( \"total\", nargs=\"+\", type=int, help=\"Indicates", "users data\" def add_arguments(self, parser): parser.add_argument( \"total\", nargs=\"+\", type=int, help=\"Indicates the number of", "old data...\") # Don't delete superuser User.objects.filter(is_superuser=False).delete() self.stdout.write(\"Creating new data...\") # Create all", "users people = [] for _ in range(total): person = UserFactory() people.append(person) self.stdout.write(\"Success\")", "jobya.users.models import User from jobya.users.tests.factories import UserFactory class Command(BaseCommand): help = \"Set up", "nargs=\"+\", type=int, help=\"Indicates the number of users to be created\", ) @transaction.atomic def", "parser.add_argument( \"total\", nargs=\"+\", type=int, help=\"Indicates the number of users to be created\", )", "be created\", ) @transaction.atomic def handle(self, *args, **options): total = options[\"total\"][0] self.stdout.write(\"Deleting old", "superuser User.objects.filter(is_superuser=False).delete() self.stdout.write(\"Creating new data...\") # Create all the users people = []", "self.stdout.write(\"Creating new data...\") # Create all the users people = [] for _", "users to be created\", ) @transaction.atomic def handle(self, *args, **options): total = options[\"total\"][0]", "django.core.management.base import BaseCommand from django.db import transaction from jobya.users.models import User from jobya.users.tests.factories", "class Command(BaseCommand): help = \"Set up users data\" def add_arguments(self, parser): parser.add_argument( \"total\",", "# Don't delete superuser User.objects.filter(is_superuser=False).delete() self.stdout.write(\"Creating new data...\") # Create all the users", "new data...\") # Create all the users people = [] for _ in", "# Create all the users people = [] for _ in range(total): person", "jobya.users.tests.factories import UserFactory class Command(BaseCommand): help = \"Set up users data\" def add_arguments(self,", "Create all the users people = [] for _ in range(total): person =", "\"total\", nargs=\"+\", type=int, help=\"Indicates the number of users to be created\", ) @transaction.atomic", "the users people = [] for _ in range(total): person = UserFactory() people.append(person)", "help=\"Indicates the number of users to be created\", ) @transaction.atomic def handle(self, *args,", "UserFactory class Command(BaseCommand): help = \"Set up users data\" def add_arguments(self, parser): parser.add_argument(", "handle(self, *args, **options): total = options[\"total\"][0] self.stdout.write(\"Deleting old data...\") # Don't delete superuser", "= options[\"total\"][0] self.stdout.write(\"Deleting old data...\") # Don't delete superuser User.objects.filter(is_superuser=False).delete() self.stdout.write(\"Creating new data...\")", "User.objects.filter(is_superuser=False).delete() self.stdout.write(\"Creating new data...\") # Create all the users people = [] for", "up users data\" def add_arguments(self, parser): parser.add_argument( \"total\", nargs=\"+\", type=int, help=\"Indicates the number", "of users to be created\", ) @transaction.atomic def handle(self, *args, **options): total =" ]
[ "# # @lc app=leetcode.cn id=1662 lang=python3 # # [1662] minimum-numbers-of-function-calls-to-make-target-array # None #", "# @lc app=leetcode.cn id=1662 lang=python3 # # [1662] minimum-numbers-of-function-calls-to-make-target-array # None # @lc", "@lc app=leetcode.cn id=1662 lang=python3 # # [1662] minimum-numbers-of-function-calls-to-make-target-array # None # @lc code=end" ]
[ "object :param params: Dictionary with the test parameters :param env: Dictionary with test", "\"(time {:s})\".format(fetch_data_from_zero_device) realtime2 = get_excution_time(session, fetch_data_from_zero_device) logging.info(\"The real execution time of the command", "= params[\"main_vm\"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() timeout = float(params.get(\"login_timeout\",", "out).group(1)) except: exceptions.TestError(\"Unable to read realtime, cmd output: %s\" % out) def run_sg_utils(disk_name,", "exceptions.TestFail(\"after sg_writesame, image hash value becomes different between guest and host \") session.close()", "get_excution_time(session, fetch_data_from_file) logging.info(\"The real execution time of the command is:{:f}\".format(realtime1)) if params.get(\"disk_type\") ==", "return float(re.search(r\"real\\s+\\dm(.*)s\", out).group(1)) except: exceptions.TestError(\"Unable to read realtime, cmd output: %s\" % out)", "float(re.search(r\"real\\s+\\dm(.*)s\", out).group(1)) except: exceptions.TestError(\"Unable to read realtime, cmd output: %s\" % out) def", "--num=32 --lba=80 {1};\" cmd += \"sg_write_same --in /dev/zero --num=96 --lba=0 {1};\" cmd +=", "map --output=json {:s}\".format(disk_name)) logging.debug(\"json map: {}\".format(output)) time.sleep(0.1) fetch_data_from_zero_device = \"sg_write_same --in /dev/zero --num=65534", "{0};\"\"\" cmd += \"sg_write_same --in {0} --num=32 --lba=80 {1};\" cmd += \"sg_write_same --in", "{:s}\".format(disk_name) fetch_data_from_zero_device = \"(time {:s})\".format(fetch_data_from_zero_device) realtime2 = get_excution_time(session, fetch_data_from_zero_device) logging.info(\"The real execution time", "in guest through shell command \"time\". :param session: Guest session :param cmd: Commands", "sha1 value of the guest disk 4) In host, check the sha1 value", "240)) session = vm.wait_for_login(timeout=timeout) guest_disk_name = thin_provisioning.get_scsi_disk(session)[1] run_sg_utils(guest_disk_name, session) guest_sha1 = session.cmd_output(\"sha1sum {:s}\".format(guest_disk_name)).split()[0]", "{:s})\".format(fetch_data_from_file) realtime1 = get_excution_time(session, fetch_data_from_file) logging.info(\"The real execution time of the command is:{:f}\".format(realtime1))", "guest_disk_name = thin_provisioning.get_scsi_disk(session)[1] run_sg_utils(guest_disk_name, session) guest_sha1 = session.cmd_output(\"sha1sum {:s}\".format(guest_disk_name)).split()[0] host_sha1 = process.system_output(\"sha1sum {:s}\".format(disk_name)).split()[0]", "sg_utils package. :param disk_name: The Guest disk name :param session: Guest Session :return:", "\"sg_write_same --in /dev/zero --num=96 --lba=0 {1};\" cmd += \"sg_write_same -U --in /dev/zero --num=16", "if re.search(r\"bad field in Write same\", out3) is None: raise exceptions.TestFail(\"sg_write_same command fails.", "Guest Session :return: None \"\"\" yesfile = \"/home/buf\" cmd = \"\"\"yes | head", "guest_sha1 != host_sha1: raise exceptions.TestFail(\"after sg_writesame, image hash value becomes different between guest", ":param session: Guest Session :return: None \"\"\" yesfile = \"/home/buf\" cmd = \"\"\"yes", "{:s}\".format(yesfile, disk_name) fetch_data_from_file = \"(time {:s})\".format(fetch_data_from_file) realtime1 = get_excution_time(session, fetch_data_from_file) logging.info(\"The real execution", "disk_name) session.cmd(cmd) fetch_data_from_file = \"sg_write_same --in {:s} --num=65536 --lba=131074 {:s}\".format(yesfile, disk_name) fetch_data_from_file =", "from virttest import data_dir from virttest import env_process from avocado.utils import process from", "\"\"\" yesfile = \"/home/buf\" cmd = \"\"\"yes | head -n2048 > {0};\"\"\" cmd", "execution time of the command is {:f}\".format(realtime2)) out3 = session.cmd_output(\"sg_write_same --in /dev/zero --num=0", "vm = env.get_vm(vm_name) vm.verify_alive() timeout = float(params.get(\"login_timeout\", 240)) session = vm.wait_for_login(timeout=timeout) guest_disk_name =", "realtime1 = get_excution_time(session, fetch_data_from_file) logging.info(\"The real execution time of the command is:{:f}\".format(realtime1)) if", "bitmap is: {}\".format(bitmap)) else: output = process.system_output(\"qemu-img map --output=json {:s}\".format(disk_name)) logging.debug(\"json map: {}\".format(output))", "longger\") thin_provisioning.destroy_vm(env) if params.get(\"disk_type\") == \"scsi_debug\": disk_name = thin_provisioning.get_scsi_disk()[1] params[\"image_name_image_test\"] = disk_name else:", "thin_provisioning.get_scsi_disk(session)[1] run_sg_utils(guest_disk_name, session) guest_sha1 = session.cmd_output(\"sha1sum {:s}\".format(guest_disk_name)).split()[0] host_sha1 = process.system_output(\"sha1sum {:s}\".format(disk_name)).split()[0] if guest_sha1", "import exceptions from autotest.client.shared import error from qemu.tests import thin_provisioning @error.context_aware def run(test,", "session :param cmd: Commands to execute :return: The real execution time \"\"\" out", "thin_provisioning @error.context_aware def run(test, params, env): \"\"\" 'thin-provisioning' functions test using sg_utils: 1)", "= float(params.get(\"login_timeout\", 240)) session = vm.wait_for_login(timeout=timeout) guest_disk_name = thin_provisioning.get_scsi_disk(session)[1] run_sg_utils(guest_disk_name, session) guest_sha1 =", "guest with the scsi disk 2) using sg_utils to do some test 3)", ":param cmd: Commands to execute :return: The real execution time \"\"\" out =", "read realtime, cmd output: %s\" % out) def run_sg_utils(disk_name, session): \"\"\" This function", "/dev/zero --num=65534 --lba=196608 {:s}\".format(disk_name) fetch_data_from_zero_device = \"(time {:s})\".format(fetch_data_from_zero_device) realtime2 = get_excution_time(session, fetch_data_from_zero_device) logging.info(\"The", "logging from virttest import data_dir from virttest import env_process from avocado.utils import process", "guest disk 4) In host, check the sha1 value of the disk image", "the disk image :param test: QEMU test object :param params: Dictionary with the", "In host, check the sha1 value of the disk image :param test: QEMU", "--lba=131074 {:s}\".format(yesfile, disk_name) fetch_data_from_file = \"(time {:s})\".format(fetch_data_from_file) realtime1 = get_excution_time(session, fetch_data_from_file) logging.info(\"The real", "== \"scsi_debug\": disk_name = thin_provisioning.get_scsi_disk()[1] params[\"image_name_image_test\"] = disk_name else: disk_name = os.path.join(data_dir.get_data_dir(), params.get(\"image_name_image_test\"))", "import process from avocado.core import exceptions from autotest.client.shared import error from qemu.tests import", "if params.get(\"disk_type\") == \"scsi_debug\": disk_name = thin_provisioning.get_scsi_disk()[1] params[\"image_name_image_test\"] = disk_name else: disk_name =", "is: {}\".format(bitmap)) else: output = process.system_output(\"qemu-img map --output=json {:s}\".format(disk_name)) logging.debug(\"json map: {}\".format(output)) time.sleep(0.1)", "Guest disk name :param session: Guest Session :return: None \"\"\" yesfile = \"/home/buf\"", "'thin-provisioning' functions test using sg_utils: 1) Boot up the guest with the scsi", "\"scsi_debug\": bitmap = thin_provisioning.get_allocation_bitmap() logging.debug(\"Block allocation bitmap is: {}\".format(bitmap)) else: output = process.system_output(\"qemu-img", "output: %s\" % out) def run_sg_utils(disk_name, session): \"\"\" This function is used do", "params.get(\"disk_type\") == \"scsi_debug\": disk_name = thin_provisioning.get_scsi_disk()[1] params[\"image_name_image_test\"] = disk_name else: disk_name = os.path.join(data_dir.get_data_dir(),", "= \"sg_write_same --in {:s} --num=65536 --lba=131074 {:s}\".format(yesfile, disk_name) fetch_data_from_file = \"(time {:s})\".format(fetch_data_from_file) realtime1", "import logging from virttest import data_dir from virttest import env_process from avocado.utils import", "process.system_output(\"qemu-img map --output=json {:s}\".format(disk_name)) logging.debug(\"json map: {}\".format(output)) time.sleep(0.1) fetch_data_from_zero_device = \"sg_write_same --in /dev/zero", "data_dir from virttest import env_process from avocado.utils import process from avocado.core import exceptions", "is used to measure the real execution time of the command in guest", "= process.system_output(\"sha1sum {:s}\".format(disk_name)).split()[0] if guest_sha1 != host_sha1: raise exceptions.TestFail(\"after sg_writesame, image hash value", "= \"(time {:s})\".format(fetch_data_from_file) realtime1 = get_excution_time(session, fetch_data_from_file) logging.info(\"The real execution time of the", "is much longger\") thin_provisioning.destroy_vm(env) if params.get(\"disk_type\") == \"scsi_debug\": disk_name = thin_provisioning.get_scsi_disk()[1] params[\"image_name_image_test\"] =", "value of the guest disk 4) In host, check the sha1 value of", "= get_excution_time(session, fetch_data_from_zero_device) logging.info(\"The real execution time of the command is {:f}\".format(realtime2)) out3", "real execution time of the command is:{:f}\".format(realtime1)) if params.get(\"disk_type\") == \"scsi_debug\": bitmap =", "env.get_vm(vm_name) vm.verify_alive() timeout = float(params.get(\"login_timeout\", 240)) session = vm.wait_for_login(timeout=timeout) guest_disk_name = thin_provisioning.get_scsi_disk(session)[1] run_sg_utils(guest_disk_name,", "package. :param disk_name: The Guest disk name :param session: Guest Session :return: None", "raise exceptions.TestFail(\"sg_write_same command fails. output is {}\".format(out3)) if realtime2 > realtime1: raise exceptions.TestFail(\"time", "\"\"\" def get_excution_time(session, cmd): \"\"\" This function is used to measure the real", "using sg_utils to do some test 3) In guest, check the sha1 value", "the command is {:f}\".format(realtime2)) out3 = session.cmd_output(\"sg_write_same --in /dev/zero --num=0 --lba=128 {:s}\".format(disk_name)) logging.debug(out3)", "None: raise exceptions.TestFail(\"sg_write_same command fails. output is {}\".format(out3)) if realtime2 > realtime1: raise", "get_excution_time(session, cmd): \"\"\" This function is used to measure the real execution time", "params.get(\"disk_type\") == \"scsi_debug\": bitmap = thin_provisioning.get_allocation_bitmap() logging.debug(\"Block allocation bitmap is: {}\".format(bitmap)) else: output", "3) In guest, check the sha1 value of the guest disk 4) In", "session.cmd(cmd) fetch_data_from_file = \"sg_write_same --in {:s} --num=65536 --lba=131074 {:s}\".format(yesfile, disk_name) fetch_data_from_file = \"(time", "thin_provisioning.destroy_vm(env) if params.get(\"disk_type\") == \"scsi_debug\": disk_name = thin_provisioning.get_scsi_disk()[1] params[\"image_name_image_test\"] = disk_name else: disk_name", "execution time of the command in guest through shell command \"time\". :param session:", "error from qemu.tests import thin_provisioning @error.context_aware def run(test, params, env): \"\"\" 'thin-provisioning' functions", "process from avocado.core import exceptions from autotest.client.shared import error from qemu.tests import thin_provisioning", "--lba=196608 {:s}\".format(disk_name) fetch_data_from_zero_device = \"(time {:s})\".format(fetch_data_from_zero_device) realtime2 = get_excution_time(session, fetch_data_from_zero_device) logging.info(\"The real execution", "\"scsi_debug\": disk_name = thin_provisioning.get_scsi_disk()[1] params[\"image_name_image_test\"] = disk_name else: disk_name = os.path.join(data_dir.get_data_dir(), params.get(\"image_name_image_test\")) disk_name", "with the scsi disk 2) using sg_utils to do some test 3) In", "+= \"sg_write_same -U --in /dev/zero --num=16 --lba=0 {1};\" cmd = cmd.format(yesfile, disk_name) session.cmd(cmd)", "params[\"main_vm\"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() timeout = float(params.get(\"login_timeout\", 240))", "guest, check the sha1 value of the guest disk 4) In host, check", "+= \"sg_write_same --in /dev/zero --num=96 --lba=0 {1};\" cmd += \"sg_write_same -U --in /dev/zero", "output = process.system_output(\"qemu-img map --output=json {:s}\".format(disk_name)) logging.debug(\"json map: {}\".format(output)) time.sleep(0.1) fetch_data_from_zero_device = \"sg_write_same", "disk name :param session: Guest Session :return: None \"\"\" yesfile = \"/home/buf\" cmd", "--num=0 --lba=128 {:s}\".format(disk_name)) logging.debug(out3) if re.search(r\"bad field in Write same\", out3) is None:", "run_sg_utils(guest_disk_name, session) guest_sha1 = session.cmd_output(\"sha1sum {:s}\".format(guest_disk_name)).split()[0] host_sha1 = process.system_output(\"sha1sum {:s}\".format(disk_name)).split()[0] if guest_sha1 !=", "import env_process from avocado.utils import process from avocado.core import exceptions from autotest.client.shared import", "params[\"image_name_image_test\"] = disk_name else: disk_name = os.path.join(data_dir.get_data_dir(), params.get(\"image_name_image_test\")) disk_name = \"{:s}.raw\".format(disk_name) params[\"start_vm\"] =", "guest_sha1 = session.cmd_output(\"sha1sum {:s}\".format(guest_disk_name)).split()[0] host_sha1 = process.system_output(\"sha1sum {:s}\".format(disk_name)).split()[0] if guest_sha1 != host_sha1: raise", "logging.debug(\"json map: {}\".format(output)) time.sleep(0.1) fetch_data_from_zero_device = \"sg_write_same --in /dev/zero --num=65534 --lba=196608 {:s}\".format(disk_name) fetch_data_from_zero_device", "avocado.core import exceptions from autotest.client.shared import error from qemu.tests import thin_provisioning @error.context_aware def", "realtime, cmd output: %s\" % out) def run_sg_utils(disk_name, session): \"\"\" This function is", ":return: None \"\"\" yesfile = \"/home/buf\" cmd = \"\"\"yes | head -n2048 >", "name :param session: Guest Session :return: None \"\"\" yesfile = \"/home/buf\" cmd =", "/dev/zero --num=16 --lba=0 {1};\" cmd = cmd.format(yesfile, disk_name) session.cmd(cmd) fetch_data_from_file = \"sg_write_same --in", "= cmd.format(yesfile, disk_name) session.cmd(cmd) fetch_data_from_file = \"sg_write_same --in {:s} --num=65536 --lba=131074 {:s}\".format(yesfile, disk_name)", "--output=json {:s}\".format(disk_name)) logging.debug(\"json map: {}\".format(output)) time.sleep(0.1) fetch_data_from_zero_device = \"sg_write_same --in /dev/zero --num=65534 --lba=196608", "test environment. \"\"\" def get_excution_time(session, cmd): \"\"\" This function is used to measure", "disk_name: The Guest disk name :param session: Guest Session :return: None \"\"\" yesfile", "env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() timeout = float(params.get(\"login_timeout\", 240)) session = vm.wait_for_login(timeout=timeout)", "\"\"\" 'thin-provisioning' functions test using sg_utils: 1) Boot up the guest with the", "import os import re import time import logging from virttest import data_dir from", "run(test, params, env): \"\"\" 'thin-provisioning' functions test using sg_utils: 1) Boot up the", "try: return float(re.search(r\"real\\s+\\dm(.*)s\", out).group(1)) except: exceptions.TestError(\"Unable to read realtime, cmd output: %s\" %", "> {0};\"\"\" cmd += \"sg_write_same --in {0} --num=32 --lba=80 {1};\" cmd += \"sg_write_same", "== \"scsi_debug\": bitmap = thin_provisioning.get_allocation_bitmap() logging.debug(\"Block allocation bitmap is: {}\".format(bitmap)) else: output =", "fails. output is {}\".format(out3)) if realtime2 > realtime1: raise exceptions.TestFail(\"time used is much", "\"sg_write_same --in {0} --num=32 --lba=80 {1};\" cmd += \"sg_write_same --in /dev/zero --num=96 --lba=0", "same\", out3) is None: raise exceptions.TestFail(\"sg_write_same command fails. output is {}\".format(out3)) if realtime2", "cmd = cmd.format(yesfile, disk_name) session.cmd(cmd) fetch_data_from_file = \"sg_write_same --in {:s} --num=65536 --lba=131074 {:s}\".format(yesfile,", "\"sg_write_same --in /dev/zero --num=65534 --lba=196608 {:s}\".format(disk_name) fetch_data_from_zero_device = \"(time {:s})\".format(fetch_data_from_zero_device) realtime2 = get_excution_time(session,", "virttest import data_dir from virttest import env_process from avocado.utils import process from avocado.core", "os.path.join(data_dir.get_data_dir(), params.get(\"image_name_image_test\")) disk_name = \"{:s}.raw\".format(disk_name) params[\"start_vm\"] = \"yes\" vm_name = params[\"main_vm\"] env_process.preprocess_vm(test, params,", "host, check the sha1 value of the disk image :param test: QEMU test", "= session.cmd_output(\"sg_write_same --in /dev/zero --num=0 --lba=128 {:s}\".format(disk_name)) logging.debug(out3) if re.search(r\"bad field in Write", "Guest session :param cmd: Commands to execute :return: The real execution time \"\"\"", "= session.cmd_output(cmd) try: return float(re.search(r\"real\\s+\\dm(.*)s\", out).group(1)) except: exceptions.TestError(\"Unable to read realtime, cmd output:", "{:s} --num=65536 --lba=131074 {:s}\".format(yesfile, disk_name) fetch_data_from_file = \"(time {:s})\".format(fetch_data_from_file) realtime1 = get_excution_time(session, fetch_data_from_file)", "measure the real execution time of the command in guest through shell command", "execution time \"\"\" out = session.cmd_output(cmd) try: return float(re.search(r\"real\\s+\\dm(.*)s\", out).group(1)) except: exceptions.TestError(\"Unable to", "parameters :param env: Dictionary with test environment. \"\"\" def get_excution_time(session, cmd): \"\"\" This", "cmd: Commands to execute :return: The real execution time \"\"\" out = session.cmd_output(cmd)", "real execution time of the command in guest through shell command \"time\". :param", "the guest disk 4) In host, check the sha1 value of the disk", "\"\"\"yes | head -n2048 > {0};\"\"\" cmd += \"sg_write_same --in {0} --num=32 --lba=80", "This function is used to measure the real execution time of the command", "output is {}\".format(out3)) if realtime2 > realtime1: raise exceptions.TestFail(\"time used is much longger\")", "\"time\". :param session: Guest session :param cmd: Commands to execute :return: The real", "to measure the real execution time of the command in guest through shell", "--in /dev/zero --num=0 --lba=128 {:s}\".format(disk_name)) logging.debug(out3) if re.search(r\"bad field in Write same\", out3)", "function is used do to some test on the disk using sg_utils package.", "\"\"\" out = session.cmd_output(cmd) try: return float(re.search(r\"real\\s+\\dm(.*)s\", out).group(1)) except: exceptions.TestError(\"Unable to read realtime,", "--in {0} --num=32 --lba=80 {1};\" cmd += \"sg_write_same --in /dev/zero --num=96 --lba=0 {1};\"", "params.get(\"image_name_image_test\")) disk_name = \"{:s}.raw\".format(disk_name) params[\"start_vm\"] = \"yes\" vm_name = params[\"main_vm\"] env_process.preprocess_vm(test, params, env,", "autotest.client.shared import error from qemu.tests import thin_provisioning @error.context_aware def run(test, params, env): \"\"\"", "vm.wait_for_login(timeout=timeout) guest_disk_name = thin_provisioning.get_scsi_disk(session)[1] run_sg_utils(guest_disk_name, session) guest_sha1 = session.cmd_output(\"sha1sum {:s}\".format(guest_disk_name)).split()[0] host_sha1 = process.system_output(\"sha1sum", "logging.debug(out3) if re.search(r\"bad field in Write same\", out3) is None: raise exceptions.TestFail(\"sg_write_same command", "if params.get(\"disk_type\") == \"scsi_debug\": bitmap = thin_provisioning.get_allocation_bitmap() logging.debug(\"Block allocation bitmap is: {}\".format(bitmap)) else:", "--in /dev/zero --num=65534 --lba=196608 {:s}\".format(disk_name) fetch_data_from_zero_device = \"(time {:s})\".format(fetch_data_from_zero_device) realtime2 = get_excution_time(session, fetch_data_from_zero_device)", "+= \"sg_write_same --in {0} --num=32 --lba=80 {1};\" cmd += \"sg_write_same --in /dev/zero --num=96", "used do to some test on the disk using sg_utils package. :param disk_name:", "--num=65534 --lba=196608 {:s}\".format(disk_name) fetch_data_from_zero_device = \"(time {:s})\".format(fetch_data_from_zero_device) realtime2 = get_excution_time(session, fetch_data_from_zero_device) logging.info(\"The real", "params[\"start_vm\"] = \"yes\" vm_name = params[\"main_vm\"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name)", "process.system_output(\"sha1sum {:s}\".format(disk_name)).split()[0] if guest_sha1 != host_sha1: raise exceptions.TestFail(\"after sg_writesame, image hash value becomes", "None \"\"\" yesfile = \"/home/buf\" cmd = \"\"\"yes | head -n2048 > {0};\"\"\"", "This function is used do to some test on the disk using sg_utils", "time of the command in guest through shell command \"time\". :param session: Guest", "params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() timeout = float(params.get(\"login_timeout\", 240)) session =", "test parameters :param env: Dictionary with test environment. \"\"\" def get_excution_time(session, cmd): \"\"\"", "cmd output: %s\" % out) def run_sg_utils(disk_name, session): \"\"\" This function is used", "command fails. output is {}\".format(out3)) if realtime2 > realtime1: raise exceptions.TestFail(\"time used is", "2) using sg_utils to do some test 3) In guest, check the sha1", "disk_name = \"{:s}.raw\".format(disk_name) params[\"start_vm\"] = \"yes\" vm_name = params[\"main_vm\"] env_process.preprocess_vm(test, params, env, vm_name)", "--num=65536 --lba=131074 {:s}\".format(yesfile, disk_name) fetch_data_from_file = \"(time {:s})\".format(fetch_data_from_file) realtime1 = get_excution_time(session, fetch_data_from_file) logging.info(\"The", "the disk using sg_utils package. :param disk_name: The Guest disk name :param session:", "time.sleep(0.1) fetch_data_from_zero_device = \"sg_write_same --in /dev/zero --num=65534 --lba=196608 {:s}\".format(disk_name) fetch_data_from_zero_device = \"(time {:s})\".format(fetch_data_from_zero_device)", "the real execution time of the command in guest through shell command \"time\".", ":param session: Guest session :param cmd: Commands to execute :return: The real execution", "disk using sg_utils package. :param disk_name: The Guest disk name :param session: Guest", "{:s}\".format(disk_name)) logging.debug(out3) if re.search(r\"bad field in Write same\", out3) is None: raise exceptions.TestFail(\"sg_write_same", "time of the command is:{:f}\".format(realtime1)) if params.get(\"disk_type\") == \"scsi_debug\": bitmap = thin_provisioning.get_allocation_bitmap() logging.debug(\"Block", "4) In host, check the sha1 value of the disk image :param test:", "thin_provisioning.get_scsi_disk()[1] params[\"image_name_image_test\"] = disk_name else: disk_name = os.path.join(data_dir.get_data_dir(), params.get(\"image_name_image_test\")) disk_name = \"{:s}.raw\".format(disk_name) params[\"start_vm\"]", "= \"(time {:s})\".format(fetch_data_from_zero_device) realtime2 = get_excution_time(session, fetch_data_from_zero_device) logging.info(\"The real execution time of the", "to do some test 3) In guest, check the sha1 value of the", "test: QEMU test object :param params: Dictionary with the test parameters :param env:", "bitmap = thin_provisioning.get_allocation_bitmap() logging.debug(\"Block allocation bitmap is: {}\".format(bitmap)) else: output = process.system_output(\"qemu-img map", "check the sha1 value of the disk image :param test: QEMU test object", "vm_name = params[\"main_vm\"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() timeout =", "command in guest through shell command \"time\". :param session: Guest session :param cmd:", "shell command \"time\". :param session: Guest session :param cmd: Commands to execute :return:", "to read realtime, cmd output: %s\" % out) def run_sg_utils(disk_name, session): \"\"\" This", "through shell command \"time\". :param session: Guest session :param cmd: Commands to execute", "functions test using sg_utils: 1) Boot up the guest with the scsi disk", "cmd += \"sg_write_same --in {0} --num=32 --lba=80 {1};\" cmd += \"sg_write_same --in /dev/zero", "fetch_data_from_zero_device = \"sg_write_same --in /dev/zero --num=65534 --lba=196608 {:s}\".format(disk_name) fetch_data_from_zero_device = \"(time {:s})\".format(fetch_data_from_zero_device) realtime2", "thin_provisioning.get_allocation_bitmap() logging.debug(\"Block allocation bitmap is: {}\".format(bitmap)) else: output = process.system_output(\"qemu-img map --output=json {:s}\".format(disk_name))", "realtime2 = get_excution_time(session, fetch_data_from_zero_device) logging.info(\"The real execution time of the command is {:f}\".format(realtime2))", "\"(time {:s})\".format(fetch_data_from_file) realtime1 = get_excution_time(session, fetch_data_from_file) logging.info(\"The real execution time of the command", "session.cmd_output(\"sha1sum {:s}\".format(guest_disk_name)).split()[0] host_sha1 = process.system_output(\"sha1sum {:s}\".format(disk_name)).split()[0] if guest_sha1 != host_sha1: raise exceptions.TestFail(\"after sg_writesame,", "env_process from avocado.utils import process from avocado.core import exceptions from autotest.client.shared import error", "timeout = float(params.get(\"login_timeout\", 240)) session = vm.wait_for_login(timeout=timeout) guest_disk_name = thin_provisioning.get_scsi_disk(session)[1] run_sg_utils(guest_disk_name, session) guest_sha1", "--lba=0 {1};\" cmd = cmd.format(yesfile, disk_name) session.cmd(cmd) fetch_data_from_file = \"sg_write_same --in {:s} --num=65536", "import re import time import logging from virttest import data_dir from virttest import", "{0} --num=32 --lba=80 {1};\" cmd += \"sg_write_same --in /dev/zero --num=96 --lba=0 {1};\" cmd", "cmd.format(yesfile, disk_name) session.cmd(cmd) fetch_data_from_file = \"sg_write_same --in {:s} --num=65536 --lba=131074 {:s}\".format(yesfile, disk_name) fetch_data_from_file", "exceptions from autotest.client.shared import error from qemu.tests import thin_provisioning @error.context_aware def run(test, params,", "used to measure the real execution time of the command in guest through", "-U --in /dev/zero --num=16 --lba=0 {1};\" cmd = cmd.format(yesfile, disk_name) session.cmd(cmd) fetch_data_from_file =", "= \"sg_write_same --in /dev/zero --num=65534 --lba=196608 {:s}\".format(disk_name) fetch_data_from_zero_device = \"(time {:s})\".format(fetch_data_from_zero_device) realtime2 =", "exceptions.TestFail(\"sg_write_same command fails. output is {}\".format(out3)) if realtime2 > realtime1: raise exceptions.TestFail(\"time used", "in Write same\", out3) is None: raise exceptions.TestFail(\"sg_write_same command fails. output is {}\".format(out3))", "session.cmd_output(\"sg_write_same --in /dev/zero --num=0 --lba=128 {:s}\".format(disk_name)) logging.debug(out3) if re.search(r\"bad field in Write same\",", "\"sg_write_same -U --in /dev/zero --num=16 --lba=0 {1};\" cmd = cmd.format(yesfile, disk_name) session.cmd(cmd) fetch_data_from_file", "session) guest_sha1 = session.cmd_output(\"sha1sum {:s}\".format(guest_disk_name)).split()[0] host_sha1 = process.system_output(\"sha1sum {:s}\".format(disk_name)).split()[0] if guest_sha1 != host_sha1:", "head -n2048 > {0};\"\"\" cmd += \"sg_write_same --in {0} --num=32 --lba=80 {1};\" cmd", "disk image :param test: QEMU test object :param params: Dictionary with the test", "hash value becomes different between guest and host \") session.close() if vm: vm.destroy()", "using sg_utils package. :param disk_name: The Guest disk name :param session: Guest Session", "The real execution time \"\"\" out = session.cmd_output(cmd) try: return float(re.search(r\"real\\s+\\dm(.*)s\", out).group(1)) except:", "of the command is {:f}\".format(realtime2)) out3 = session.cmd_output(\"sg_write_same --in /dev/zero --num=0 --lba=128 {:s}\".format(disk_name))", "command is:{:f}\".format(realtime1)) if params.get(\"disk_type\") == \"scsi_debug\": bitmap = thin_provisioning.get_allocation_bitmap() logging.debug(\"Block allocation bitmap is:", "= thin_provisioning.get_scsi_disk()[1] params[\"image_name_image_test\"] = disk_name else: disk_name = os.path.join(data_dir.get_data_dir(), params.get(\"image_name_image_test\")) disk_name = \"{:s}.raw\".format(disk_name)", "sg_writesame, image hash value becomes different between guest and host \") session.close() if", "session: Guest Session :return: None \"\"\" yesfile = \"/home/buf\" cmd = \"\"\"yes |", "the command in guest through shell command \"time\". :param session: Guest session :param", "raise exceptions.TestFail(\"time used is much longger\") thin_provisioning.destroy_vm(env) if params.get(\"disk_type\") == \"scsi_debug\": disk_name =", "cmd = \"\"\"yes | head -n2048 > {0};\"\"\" cmd += \"sg_write_same --in {0}", "\"yes\" vm_name = params[\"main_vm\"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() timeout", "guest through shell command \"time\". :param session: Guest session :param cmd: Commands to", "command is {:f}\".format(realtime2)) out3 = session.cmd_output(\"sg_write_same --in /dev/zero --num=0 --lba=128 {:s}\".format(disk_name)) logging.debug(out3) if", "is {:f}\".format(realtime2)) out3 = session.cmd_output(\"sg_write_same --in /dev/zero --num=0 --lba=128 {:s}\".format(disk_name)) logging.debug(out3) if re.search(r\"bad", "params: Dictionary with the test parameters :param env: Dictionary with test environment. \"\"\"", "= \"yes\" vm_name = params[\"main_vm\"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive()", "= session.cmd_output(\"sha1sum {:s}\".format(guest_disk_name)).split()[0] host_sha1 = process.system_output(\"sha1sum {:s}\".format(disk_name)).split()[0] if guest_sha1 != host_sha1: raise exceptions.TestFail(\"after", ":param env: Dictionary with test environment. \"\"\" def get_excution_time(session, cmd): \"\"\" This function", "qemu.tests import thin_provisioning @error.context_aware def run(test, params, env): \"\"\" 'thin-provisioning' functions test using", "re import time import logging from virttest import data_dir from virttest import env_process", ":param test: QEMU test object :param params: Dictionary with the test parameters :param", "env: Dictionary with test environment. \"\"\" def get_excution_time(session, cmd): \"\"\" This function is", "logging.info(\"The real execution time of the command is:{:f}\".format(realtime1)) if params.get(\"disk_type\") == \"scsi_debug\": bitmap", "--lba=80 {1};\" cmd += \"sg_write_same --in /dev/zero --num=96 --lba=0 {1};\" cmd += \"sg_write_same", "os import re import time import logging from virttest import data_dir from virttest", "float(params.get(\"login_timeout\", 240)) session = vm.wait_for_login(timeout=timeout) guest_disk_name = thin_provisioning.get_scsi_disk(session)[1] run_sg_utils(guest_disk_name, session) guest_sha1 = session.cmd_output(\"sha1sum", "time of the command is {:f}\".format(realtime2)) out3 = session.cmd_output(\"sg_write_same --in /dev/zero --num=0 --lba=128", "from autotest.client.shared import error from qemu.tests import thin_provisioning @error.context_aware def run(test, params, env):", "Commands to execute :return: The real execution time \"\"\" out = session.cmd_output(cmd) try:", "host_sha1: raise exceptions.TestFail(\"after sg_writesame, image hash value becomes different between guest and host", "environment. \"\"\" def get_excution_time(session, cmd): \"\"\" This function is used to measure the", "if realtime2 > realtime1: raise exceptions.TestFail(\"time used is much longger\") thin_provisioning.destroy_vm(env) if params.get(\"disk_type\")", "some test 3) In guest, check the sha1 value of the guest disk", "test using sg_utils: 1) Boot up the guest with the scsi disk 2)", "with the test parameters :param env: Dictionary with test environment. \"\"\" def get_excution_time(session,", "of the command is:{:f}\".format(realtime1)) if params.get(\"disk_type\") == \"scsi_debug\": bitmap = thin_provisioning.get_allocation_bitmap() logging.debug(\"Block allocation", "{:s})\".format(fetch_data_from_zero_device) realtime2 = get_excution_time(session, fetch_data_from_zero_device) logging.info(\"The real execution time of the command is", "exceptions.TestError(\"Unable to read realtime, cmd output: %s\" % out) def run_sg_utils(disk_name, session): \"\"\"", "out = session.cmd_output(cmd) try: return float(re.search(r\"real\\s+\\dm(.*)s\", out).group(1)) except: exceptions.TestError(\"Unable to read realtime, cmd", "The Guest disk name :param session: Guest Session :return: None \"\"\" yesfile =", "is None: raise exceptions.TestFail(\"sg_write_same command fails. output is {}\".format(out3)) if realtime2 > realtime1:", "to execute :return: The real execution time \"\"\" out = session.cmd_output(cmd) try: return", "= os.path.join(data_dir.get_data_dir(), params.get(\"image_name_image_test\")) disk_name = \"{:s}.raw\".format(disk_name) params[\"start_vm\"] = \"yes\" vm_name = params[\"main_vm\"] env_process.preprocess_vm(test,", "yesfile = \"/home/buf\" cmd = \"\"\"yes | head -n2048 > {0};\"\"\" cmd +=", "= vm.wait_for_login(timeout=timeout) guest_disk_name = thin_provisioning.get_scsi_disk(session)[1] run_sg_utils(guest_disk_name, session) guest_sha1 = session.cmd_output(\"sha1sum {:s}\".format(guest_disk_name)).split()[0] host_sha1 =", "= get_excution_time(session, fetch_data_from_file) logging.info(\"The real execution time of the command is:{:f}\".format(realtime1)) if params.get(\"disk_type\")", "test on the disk using sg_utils package. :param disk_name: The Guest disk name", "get_excution_time(session, fetch_data_from_zero_device) logging.info(\"The real execution time of the command is {:f}\".format(realtime2)) out3 =", "the guest with the scsi disk 2) using sg_utils to do some test", "run_sg_utils(disk_name, session): \"\"\" This function is used do to some test on the", "image :param test: QEMU test object :param params: Dictionary with the test parameters", "time import logging from virttest import data_dir from virttest import env_process from avocado.utils", "scsi disk 2) using sg_utils to do some test 3) In guest, check", "/dev/zero --num=0 --lba=128 {:s}\".format(disk_name)) logging.debug(out3) if re.search(r\"bad field in Write same\", out3) is", "vm_name) vm = env.get_vm(vm_name) vm.verify_alive() timeout = float(params.get(\"login_timeout\", 240)) session = vm.wait_for_login(timeout=timeout) guest_disk_name", "Dictionary with test environment. \"\"\" def get_excution_time(session, cmd): \"\"\" This function is used", "--lba=128 {:s}\".format(disk_name)) logging.debug(out3) if re.search(r\"bad field in Write same\", out3) is None: raise", "{1};\" cmd = cmd.format(yesfile, disk_name) session.cmd(cmd) fetch_data_from_file = \"sg_write_same --in {:s} --num=65536 --lba=131074", "fetch_data_from_file = \"(time {:s})\".format(fetch_data_from_file) realtime1 = get_excution_time(session, fetch_data_from_file) logging.info(\"The real execution time of", "= disk_name else: disk_name = os.path.join(data_dir.get_data_dir(), params.get(\"image_name_image_test\")) disk_name = \"{:s}.raw\".format(disk_name) params[\"start_vm\"] = \"yes\"", "the test parameters :param env: Dictionary with test environment. \"\"\" def get_excution_time(session, cmd):", "--in {:s} --num=65536 --lba=131074 {:s}\".format(yesfile, disk_name) fetch_data_from_file = \"(time {:s})\".format(fetch_data_from_file) realtime1 = get_excution_time(session,", "Write same\", out3) is None: raise exceptions.TestFail(\"sg_write_same command fails. output is {}\".format(out3)) if", "{}\".format(out3)) if realtime2 > realtime1: raise exceptions.TestFail(\"time used is much longger\") thin_provisioning.destroy_vm(env) if", "import time import logging from virttest import data_dir from virttest import env_process from", "field in Write same\", out3) is None: raise exceptions.TestFail(\"sg_write_same command fails. output is", "of the disk image :param test: QEMU test object :param params: Dictionary with", "= process.system_output(\"qemu-img map --output=json {:s}\".format(disk_name)) logging.debug(\"json map: {}\".format(output)) time.sleep(0.1) fetch_data_from_zero_device = \"sg_write_same --in", "sg_utils: 1) Boot up the guest with the scsi disk 2) using sg_utils", "except: exceptions.TestError(\"Unable to read realtime, cmd output: %s\" % out) def run_sg_utils(disk_name, session):", "the command is:{:f}\".format(realtime1)) if params.get(\"disk_type\") == \"scsi_debug\": bitmap = thin_provisioning.get_allocation_bitmap() logging.debug(\"Block allocation bitmap", "/dev/zero --num=96 --lba=0 {1};\" cmd += \"sg_write_same -U --in /dev/zero --num=16 --lba=0 {1};\"", "of the guest disk 4) In host, check the sha1 value of the", "realtime1: raise exceptions.TestFail(\"time used is much longger\") thin_provisioning.destroy_vm(env) if params.get(\"disk_type\") == \"scsi_debug\": disk_name", "fetch_data_from_zero_device) logging.info(\"The real execution time of the command is {:f}\".format(realtime2)) out3 = session.cmd_output(\"sg_write_same", "is used do to some test on the disk using sg_utils package. :param", "check the sha1 value of the guest disk 4) In host, check the", "= env.get_vm(vm_name) vm.verify_alive() timeout = float(params.get(\"login_timeout\", 240)) session = vm.wait_for_login(timeout=timeout) guest_disk_name = thin_provisioning.get_scsi_disk(session)[1]", "real execution time \"\"\" out = session.cmd_output(cmd) try: return float(re.search(r\"real\\s+\\dm(.*)s\", out).group(1)) except: exceptions.TestError(\"Unable", "{:s}\".format(disk_name)) logging.debug(\"json map: {}\".format(output)) time.sleep(0.1) fetch_data_from_zero_device = \"sg_write_same --in /dev/zero --num=65534 --lba=196608 {:s}\".format(disk_name)", "\"/home/buf\" cmd = \"\"\"yes | head -n2048 > {0};\"\"\" cmd += \"sg_write_same --in", "--in /dev/zero --num=16 --lba=0 {1};\" cmd = cmd.format(yesfile, disk_name) session.cmd(cmd) fetch_data_from_file = \"sg_write_same", ":return: The real execution time \"\"\" out = session.cmd_output(cmd) try: return float(re.search(r\"real\\s+\\dm(.*)s\", out).group(1))", "from virttest import env_process from avocado.utils import process from avocado.core import exceptions from", "--lba=0 {1};\" cmd += \"sg_write_same -U --in /dev/zero --num=16 --lba=0 {1};\" cmd =", "raise exceptions.TestFail(\"after sg_writesame, image hash value becomes different between guest and host \")", "{}\".format(output)) time.sleep(0.1) fetch_data_from_zero_device = \"sg_write_same --in /dev/zero --num=65534 --lba=196608 {:s}\".format(disk_name) fetch_data_from_zero_device = \"(time", "disk 2) using sg_utils to do some test 3) In guest, check the", "of the command in guest through shell command \"time\". :param session: Guest session", "| head -n2048 > {0};\"\"\" cmd += \"sg_write_same --in {0} --num=32 --lba=80 {1};\"", "disk_name = thin_provisioning.get_scsi_disk()[1] params[\"image_name_image_test\"] = disk_name else: disk_name = os.path.join(data_dir.get_data_dir(), params.get(\"image_name_image_test\")) disk_name =", "env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() timeout = float(params.get(\"login_timeout\", 240)) session", "cmd += \"sg_write_same -U --in /dev/zero --num=16 --lba=0 {1};\" cmd = cmd.format(yesfile, disk_name)", "= \"/home/buf\" cmd = \"\"\"yes | head -n2048 > {0};\"\"\" cmd += \"sg_write_same", "{:f}\".format(realtime2)) out3 = session.cmd_output(\"sg_write_same --in /dev/zero --num=0 --lba=128 {:s}\".format(disk_name)) logging.debug(out3) if re.search(r\"bad field", "used is much longger\") thin_provisioning.destroy_vm(env) if params.get(\"disk_type\") == \"scsi_debug\": disk_name = thin_provisioning.get_scsi_disk()[1] params[\"image_name_image_test\"]", "= \"{:s}.raw\".format(disk_name) params[\"start_vm\"] = \"yes\" vm_name = params[\"main_vm\"] env_process.preprocess_vm(test, params, env, vm_name) vm", "{1};\" cmd += \"sg_write_same -U --in /dev/zero --num=16 --lba=0 {1};\" cmd = cmd.format(yesfile,", "re.search(r\"bad field in Write same\", out3) is None: raise exceptions.TestFail(\"sg_write_same command fails. output", "--num=16 --lba=0 {1};\" cmd = cmd.format(yesfile, disk_name) session.cmd(cmd) fetch_data_from_file = \"sg_write_same --in {:s}", "using sg_utils: 1) Boot up the guest with the scsi disk 2) using", "from avocado.core import exceptions from autotest.client.shared import error from qemu.tests import thin_provisioning @error.context_aware", "{1};\" cmd += \"sg_write_same --in /dev/zero --num=96 --lba=0 {1};\" cmd += \"sg_write_same -U", "logging.debug(\"Block allocation bitmap is: {}\".format(bitmap)) else: output = process.system_output(\"qemu-img map --output=json {:s}\".format(disk_name)) logging.debug(\"json", "out3 = session.cmd_output(\"sg_write_same --in /dev/zero --num=0 --lba=128 {:s}\".format(disk_name)) logging.debug(out3) if re.search(r\"bad field in", "time \"\"\" out = session.cmd_output(cmd) try: return float(re.search(r\"real\\s+\\dm(.*)s\", out).group(1)) except: exceptions.TestError(\"Unable to read", "session: Guest session :param cmd: Commands to execute :return: The real execution time", "disk_name = os.path.join(data_dir.get_data_dir(), params.get(\"image_name_image_test\")) disk_name = \"{:s}.raw\".format(disk_name) params[\"start_vm\"] = \"yes\" vm_name = params[\"main_vm\"]", "up the guest with the scsi disk 2) using sg_utils to do some", "the sha1 value of the guest disk 4) In host, check the sha1", "to some test on the disk using sg_utils package. :param disk_name: The Guest", "exceptions.TestFail(\"time used is much longger\") thin_provisioning.destroy_vm(env) if params.get(\"disk_type\") == \"scsi_debug\": disk_name = thin_provisioning.get_scsi_disk()[1]", "from qemu.tests import thin_provisioning @error.context_aware def run(test, params, env): \"\"\" 'thin-provisioning' functions test", "\"\"\" This function is used do to some test on the disk using", "import data_dir from virttest import env_process from avocado.utils import process from avocado.core import", "if guest_sha1 != host_sha1: raise exceptions.TestFail(\"after sg_writesame, image hash value becomes different between", "the scsi disk 2) using sg_utils to do some test 3) In guest,", "session): \"\"\" This function is used do to some test on the disk", "value of the disk image :param test: QEMU test object :param params: Dictionary", "execute :return: The real execution time \"\"\" out = session.cmd_output(cmd) try: return float(re.search(r\"real\\s+\\dm(.*)s\",", "{:s}\".format(disk_name)).split()[0] if guest_sha1 != host_sha1: raise exceptions.TestFail(\"after sg_writesame, image hash value becomes different", ":param disk_name: The Guest disk name :param session: Guest Session :return: None \"\"\"", "do to some test on the disk using sg_utils package. :param disk_name: The", "% out) def run_sg_utils(disk_name, session): \"\"\" This function is used do to some", "from avocado.utils import process from avocado.core import exceptions from autotest.client.shared import error from", "command \"time\". :param session: Guest session :param cmd: Commands to execute :return: The", "env): \"\"\" 'thin-provisioning' functions test using sg_utils: 1) Boot up the guest with", "= thin_provisioning.get_allocation_bitmap() logging.debug(\"Block allocation bitmap is: {}\".format(bitmap)) else: output = process.system_output(\"qemu-img map --output=json", "\"sg_write_same --in {:s} --num=65536 --lba=131074 {:s}\".format(yesfile, disk_name) fetch_data_from_file = \"(time {:s})\".format(fetch_data_from_file) realtime1 =", "--in /dev/zero --num=96 --lba=0 {1};\" cmd += \"sg_write_same -U --in /dev/zero --num=16 --lba=0", "allocation bitmap is: {}\".format(bitmap)) else: output = process.system_output(\"qemu-img map --output=json {:s}\".format(disk_name)) logging.debug(\"json map:", "-n2048 > {0};\"\"\" cmd += \"sg_write_same --in {0} --num=32 --lba=80 {1};\" cmd +=", "QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary", "is:{:f}\".format(realtime1)) if params.get(\"disk_type\") == \"scsi_debug\": bitmap = thin_provisioning.get_allocation_bitmap() logging.debug(\"Block allocation bitmap is: {}\".format(bitmap))", "Session :return: None \"\"\" yesfile = \"/home/buf\" cmd = \"\"\"yes | head -n2048", "In guest, check the sha1 value of the guest disk 4) In host,", ":param params: Dictionary with the test parameters :param env: Dictionary with test environment.", "cmd += \"sg_write_same --in /dev/zero --num=96 --lba=0 {1};\" cmd += \"sg_write_same -U --in", "much longger\") thin_provisioning.destroy_vm(env) if params.get(\"disk_type\") == \"scsi_debug\": disk_name = thin_provisioning.get_scsi_disk()[1] params[\"image_name_image_test\"] = disk_name", "{:s}\".format(guest_disk_name)).split()[0] host_sha1 = process.system_output(\"sha1sum {:s}\".format(disk_name)).split()[0] if guest_sha1 != host_sha1: raise exceptions.TestFail(\"after sg_writesame, image", "> realtime1: raise exceptions.TestFail(\"time used is much longger\") thin_provisioning.destroy_vm(env) if params.get(\"disk_type\") == \"scsi_debug\":", "def run(test, params, env): \"\"\" 'thin-provisioning' functions test using sg_utils: 1) Boot up", "host_sha1 = process.system_output(\"sha1sum {:s}\".format(disk_name)).split()[0] if guest_sha1 != host_sha1: raise exceptions.TestFail(\"after sg_writesame, image hash", "test object :param params: Dictionary with the test parameters :param env: Dictionary with", "= thin_provisioning.get_scsi_disk(session)[1] run_sg_utils(guest_disk_name, session) guest_sha1 = session.cmd_output(\"sha1sum {:s}\".format(guest_disk_name)).split()[0] host_sha1 = process.system_output(\"sha1sum {:s}\".format(disk_name)).split()[0] if", "sha1 value of the disk image :param test: QEMU test object :param params:", "session.cmd_output(cmd) try: return float(re.search(r\"real\\s+\\dm(.*)s\", out).group(1)) except: exceptions.TestError(\"Unable to read realtime, cmd output: %s\"", "= \"\"\"yes | head -n2048 > {0};\"\"\" cmd += \"sg_write_same --in {0} --num=32", "out) def run_sg_utils(disk_name, session): \"\"\" This function is used do to some test", "vm.verify_alive() timeout = float(params.get(\"login_timeout\", 240)) session = vm.wait_for_login(timeout=timeout) guest_disk_name = thin_provisioning.get_scsi_disk(session)[1] run_sg_utils(guest_disk_name, session)", "image hash value becomes different between guest and host \") session.close() if vm:", "function is used to measure the real execution time of the command in", "def get_excution_time(session, cmd): \"\"\" This function is used to measure the real execution", "sg_utils to do some test 3) In guest, check the sha1 value of", "{}\".format(bitmap)) else: output = process.system_output(\"qemu-img map --output=json {:s}\".format(disk_name)) logging.debug(\"json map: {}\".format(output)) time.sleep(0.1) fetch_data_from_zero_device", "map: {}\".format(output)) time.sleep(0.1) fetch_data_from_zero_device = \"sg_write_same --in /dev/zero --num=65534 --lba=196608 {:s}\".format(disk_name) fetch_data_from_zero_device =", "out3) is None: raise exceptions.TestFail(\"sg_write_same command fails. output is {}\".format(out3)) if realtime2 >", "fetch_data_from_zero_device = \"(time {:s})\".format(fetch_data_from_zero_device) realtime2 = get_excution_time(session, fetch_data_from_zero_device) logging.info(\"The real execution time of", "fetch_data_from_file = \"sg_write_same --in {:s} --num=65536 --lba=131074 {:s}\".format(yesfile, disk_name) fetch_data_from_file = \"(time {:s})\".format(fetch_data_from_file)", "some test on the disk using sg_utils package. :param disk_name: The Guest disk", "params, env): \"\"\" 'thin-provisioning' functions test using sg_utils: 1) Boot up the guest", "real execution time of the command is {:f}\".format(realtime2)) out3 = session.cmd_output(\"sg_write_same --in /dev/zero", "disk_name else: disk_name = os.path.join(data_dir.get_data_dir(), params.get(\"image_name_image_test\")) disk_name = \"{:s}.raw\".format(disk_name) params[\"start_vm\"] = \"yes\" vm_name", "Boot up the guest with the scsi disk 2) using sg_utils to do", "is {}\".format(out3)) if realtime2 > realtime1: raise exceptions.TestFail(\"time used is much longger\") thin_provisioning.destroy_vm(env)", "with test environment. \"\"\" def get_excution_time(session, cmd): \"\"\" This function is used to", "else: disk_name = os.path.join(data_dir.get_data_dir(), params.get(\"image_name_image_test\")) disk_name = \"{:s}.raw\".format(disk_name) params[\"start_vm\"] = \"yes\" vm_name =", "@error.context_aware def run(test, params, env): \"\"\" 'thin-provisioning' functions test using sg_utils: 1) Boot", "fetch_data_from_file) logging.info(\"The real execution time of the command is:{:f}\".format(realtime1)) if params.get(\"disk_type\") == \"scsi_debug\":", "Dictionary with the test parameters :param env: Dictionary with test environment. \"\"\" def", "def run_sg_utils(disk_name, session): \"\"\" This function is used do to some test on", "else: output = process.system_output(\"qemu-img map --output=json {:s}\".format(disk_name)) logging.debug(\"json map: {}\".format(output)) time.sleep(0.1) fetch_data_from_zero_device =", "execution time of the command is:{:f}\".format(realtime1)) if params.get(\"disk_type\") == \"scsi_debug\": bitmap = thin_provisioning.get_allocation_bitmap()", "%s\" % out) def run_sg_utils(disk_name, session): \"\"\" This function is used do to", "import error from qemu.tests import thin_provisioning @error.context_aware def run(test, params, env): \"\"\" 'thin-provisioning'", "disk 4) In host, check the sha1 value of the disk image :param", "logging.info(\"The real execution time of the command is {:f}\".format(realtime2)) out3 = session.cmd_output(\"sg_write_same --in", "1) Boot up the guest with the scsi disk 2) using sg_utils to", "!= host_sha1: raise exceptions.TestFail(\"after sg_writesame, image hash value becomes different between guest and", "test 3) In guest, check the sha1 value of the guest disk 4)", "the sha1 value of the disk image :param test: QEMU test object :param", "cmd): \"\"\" This function is used to measure the real execution time of", "disk_name) fetch_data_from_file = \"(time {:s})\".format(fetch_data_from_file) realtime1 = get_excution_time(session, fetch_data_from_file) logging.info(\"The real execution time", "on the disk using sg_utils package. :param disk_name: The Guest disk name :param", "session = vm.wait_for_login(timeout=timeout) guest_disk_name = thin_provisioning.get_scsi_disk(session)[1] run_sg_utils(guest_disk_name, session) guest_sha1 = session.cmd_output(\"sha1sum {:s}\".format(guest_disk_name)).split()[0] host_sha1", "import thin_provisioning @error.context_aware def run(test, params, env): \"\"\" 'thin-provisioning' functions test using sg_utils:", "--num=96 --lba=0 {1};\" cmd += \"sg_write_same -U --in /dev/zero --num=16 --lba=0 {1};\" cmd", "virttest import env_process from avocado.utils import process from avocado.core import exceptions from autotest.client.shared", "\"\"\" This function is used to measure the real execution time of the", "realtime2 > realtime1: raise exceptions.TestFail(\"time used is much longger\") thin_provisioning.destroy_vm(env) if params.get(\"disk_type\") ==", "avocado.utils import process from avocado.core import exceptions from autotest.client.shared import error from qemu.tests", "do some test 3) In guest, check the sha1 value of the guest", "\"{:s}.raw\".format(disk_name) params[\"start_vm\"] = \"yes\" vm_name = params[\"main_vm\"] env_process.preprocess_vm(test, params, env, vm_name) vm =" ]
[ "= 'https://randomuser.me/api/' request = requests.get(urls) for urls in request.json()['results']: nombre=urls['name']['first'] genero=urls['gender'] direccion=urls['location']['street'] celular=urls['cell']", "from bs4 import BeautifulSoup import requests import sqlite3 url=\"https://randomuser.me/api/\" ids=[] i=0 db =", "class cliente(): for i in range(0,51): ids.append(i) i+=1 urls = 'https://randomuser.me/api/' request =", "db = sqlite3.connect('Taqueria.db') cursor = db.cursor() class cliente(): for i in range(0,51): ids.append(i)", "range(0,51): ids.append(i) i+=1 urls = 'https://randomuser.me/api/' request = requests.get(urls) for urls in request.json()['results']:", "requests.get(urls) for urls in request.json()['results']: nombre=urls['name']['first'] genero=urls['gender'] direccion=urls['location']['street'] celular=urls['cell'] cursor.execute(\"INSERT INTO Clientes(ID_CLIENTE,NOMBRE_CLIENTE,GENERO,DIRECCION,CELULAR)VALUES(?,?,?,?,?)\",(i,nombre,genero,direccion,celular)) db.commit()", "cursor = db.cursor() class cliente(): for i in range(0,51): ids.append(i) i+=1 urls =", "in range(0,51): ids.append(i) i+=1 urls = 'https://randomuser.me/api/' request = requests.get(urls) for urls in", "import requests import sqlite3 url=\"https://randomuser.me/api/\" ids=[] i=0 db = sqlite3.connect('Taqueria.db') cursor = db.cursor()", "sqlite3 url=\"https://randomuser.me/api/\" ids=[] i=0 db = sqlite3.connect('Taqueria.db') cursor = db.cursor() class cliente(): for", "= sqlite3.connect('Taqueria.db') cursor = db.cursor() class cliente(): for i in range(0,51): ids.append(i) i+=1", "i in range(0,51): ids.append(i) i+=1 urls = 'https://randomuser.me/api/' request = requests.get(urls) for urls", "cliente(): for i in range(0,51): ids.append(i) i+=1 urls = 'https://randomuser.me/api/' request = requests.get(urls)", "i+=1 urls = 'https://randomuser.me/api/' request = requests.get(urls) for urls in request.json()['results']: nombre=urls['name']['first'] genero=urls['gender']", "bs4 import BeautifulSoup import requests import sqlite3 url=\"https://randomuser.me/api/\" ids=[] i=0 db = sqlite3.connect('Taqueria.db')", "ids=[] i=0 db = sqlite3.connect('Taqueria.db') cursor = db.cursor() class cliente(): for i in", "sqlite3.connect('Taqueria.db') cursor = db.cursor() class cliente(): for i in range(0,51): ids.append(i) i+=1 urls", "'https://randomuser.me/api/' request = requests.get(urls) for urls in request.json()['results']: nombre=urls['name']['first'] genero=urls['gender'] direccion=urls['location']['street'] celular=urls['cell'] cursor.execute(\"INSERT", "request = requests.get(urls) for urls in request.json()['results']: nombre=urls['name']['first'] genero=urls['gender'] direccion=urls['location']['street'] celular=urls['cell'] cursor.execute(\"INSERT INTO", "Martinez/Proyecto Ordinario/clientes.py from bs4 import BeautifulSoup import requests import sqlite3 url=\"https://randomuser.me/api/\" ids=[] i=0", "<filename>Ago-Dic-2018/Orlando Martinez/Proyecto Ordinario/clientes.py from bs4 import BeautifulSoup import requests import sqlite3 url=\"https://randomuser.me/api/\" ids=[]", "urls = 'https://randomuser.me/api/' request = requests.get(urls) for urls in request.json()['results']: nombre=urls['name']['first'] genero=urls['gender'] direccion=urls['location']['street']", "Ordinario/clientes.py from bs4 import BeautifulSoup import requests import sqlite3 url=\"https://randomuser.me/api/\" ids=[] i=0 db", "requests import sqlite3 url=\"https://randomuser.me/api/\" ids=[] i=0 db = sqlite3.connect('Taqueria.db') cursor = db.cursor() class", "import sqlite3 url=\"https://randomuser.me/api/\" ids=[] i=0 db = sqlite3.connect('Taqueria.db') cursor = db.cursor() class cliente():", "BeautifulSoup import requests import sqlite3 url=\"https://randomuser.me/api/\" ids=[] i=0 db = sqlite3.connect('Taqueria.db') cursor =", "for i in range(0,51): ids.append(i) i+=1 urls = 'https://randomuser.me/api/' request = requests.get(urls) for", "ids.append(i) i+=1 urls = 'https://randomuser.me/api/' request = requests.get(urls) for urls in request.json()['results']: nombre=urls['name']['first']", "= requests.get(urls) for urls in request.json()['results']: nombre=urls['name']['first'] genero=urls['gender'] direccion=urls['location']['street'] celular=urls['cell'] cursor.execute(\"INSERT INTO Clientes(ID_CLIENTE,NOMBRE_CLIENTE,GENERO,DIRECCION,CELULAR)VALUES(?,?,?,?,?)\",(i,nombre,genero,direccion,celular))", "import BeautifulSoup import requests import sqlite3 url=\"https://randomuser.me/api/\" ids=[] i=0 db = sqlite3.connect('Taqueria.db') cursor", "db.cursor() class cliente(): for i in range(0,51): ids.append(i) i+=1 urls = 'https://randomuser.me/api/' request", "url=\"https://randomuser.me/api/\" ids=[] i=0 db = sqlite3.connect('Taqueria.db') cursor = db.cursor() class cliente(): for i", "i=0 db = sqlite3.connect('Taqueria.db') cursor = db.cursor() class cliente(): for i in range(0,51):", "= db.cursor() class cliente(): for i in range(0,51): ids.append(i) i+=1 urls = 'https://randomuser.me/api/'" ]
[ "= (int(startTime[0]) + (int(startTime[1]) / 60 )) #print(startDay) dayCount = round(dayC) extrahours =", "= '%02d' % minsTotal #dayTotal = hoursTotal//12 #print(dayTotal) if day: days = [\"Sunday\",", "spaces startTime = start_Gen[0].split(\":\") #the time based on colon #start_Gen[1] is holding either", "= (ind+1)%7 for i in range(1,dayCount+1): newDay = days[ind] i=i+1 ind = (ind+1)", "on spaces startTime = start_Gen[0].split(\":\") #the time based on colon #start_Gen[1] is holding", "round(x) minsTotal = y #print(minsTotal) #print(hoursTotal) #print(minsTotal) #Counting days dayC = ((totalDurationMinutes/60) -", "the input as a list start_Gen = start.split() #based on spaces startTime =", "+ (int(startTime[1]) / 60 )) #print(startDay) dayCount = round(dayC) extrahours = hourrem+startDay extrahours", "or PM amPMTrack = start_Gen[1] if extrahours>=12: if start_Gen[1]==\"PM\": amPMTrack = \"AM\" else:", "\"Thursday\", \"Friday\", \"Saturday\"] #index holds which day is sent in the argument ind", "time in minutes #print(durationMinutes) totalDurationMinutes = startMinutes + durationMinutes #Total minutes with us", "totalDurationMinutes//60 #print(hoursTotal) x = round((totalDurationMinutes/60)%1,2) * 60 y = round(x) minsTotal = y", "AM or PM amPMTrack = start_Gen[1] if extrahours>=12: if start_Gen[1]==\"PM\": amPMTrack = \"AM\"", "day: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} (next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted}", "if hoursTotalFormatted == 0: hoursTotalFormatted=12 minsTotalformatted = '%02d' % minsTotal #dayTotal = hoursTotal//12", "hoursTotal%12 #print(hoursTotalFormatted) if hoursTotalFormatted == 0: hoursTotalFormatted=12 minsTotalformatted = '%02d' % minsTotal #dayTotal", "hoursTotal = totalDurationMinutes//60 #print(hoursTotal) x = round((totalDurationMinutes/60)%1,2) * 60 y = round(x) minsTotal", "if day: days = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"] #index holds", "if day: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} (next day)') if (dayCount>1): return", "= \"AM\" else: amPMTrack=\"PM\" #print(\"Days: \"+str(dayCount)) hoursTotalFormatted = hoursTotal hoursTotalFormatted = hoursTotal%12 #print(hoursTotalFormatted)", "= start.split() #based on spaces startTime = start_Gen[0].split(\":\") #the time based on colon", "(f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} (next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} ({dayCount} days", "startMinutes = int(startTime[0])*60 + int(startTime[1]) #Start time in minutes #print(\"Total minutes \" +str(startMinutes))", "break #print(index) #print(days[index]) newDay = \"\" if dayCount==0: newDay=day else: ind = (ind+1)%7", "7 #print(newDay) if day: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} (next day)') if", "{amPMTrack} (next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} ({dayCount} days later)') return (f'{hoursTotalFormatted}:{minsTotalformatted}", "and start_Gen[1]==\"PM\": dayCount+=1 #Deciding AM or PM amPMTrack = start_Gen[1] if extrahours>=12: if", "= start_Gen[1] if extrahours>=12: if start_Gen[1]==\"PM\": amPMTrack = \"AM\" else: amPMTrack=\"PM\" #print(\"Days: \"+str(dayCount))", "start_Gen[1] if extrahours>=12: if start_Gen[1]==\"PM\": amPMTrack = \"AM\" else: amPMTrack=\"PM\" #print(\"Days: \"+str(dayCount)) hoursTotalFormatted", "#start_Gen[1] is holding either AM or PM #print(startTime) #Holding the starting time supplied", "= hoursTotal//12 #print(dayTotal) if day: days = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\",", "days dayC = ((totalDurationMinutes/60) - (startMinutes/60))/24 #print(dayC) dayrem = round((dayC)%1,2) #print(dayrem) hourrem =", "amPMTrack=\"PM\" #print(\"Days: \"+str(dayCount)) hoursTotalFormatted = hoursTotal hoursTotalFormatted = hoursTotal%12 #print(hoursTotalFormatted) if hoursTotalFormatted ==", "y = round(x) minsTotal = y #print(minsTotal) #print(hoursTotal) #print(minsTotal) #Counting days dayC =", "#print(hoursTotalFormatted) if hoursTotalFormatted == 0: hoursTotalFormatted=12 minsTotalformatted = '%02d' % minsTotal #dayTotal =", "\"Saturday\"] #index holds which day is sent in the argument ind = 0", "return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} (next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} ({dayCount}", "{newDay}') else: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} (next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted}", "dayC = ((totalDurationMinutes/60) - (startMinutes/60))/24 #print(dayC) dayrem = round((dayC)%1,2) #print(dayrem) hourrem = dayrem*24", "hourrem+startDay extrahours = round(extrahours) #print(extrahours) if extrahours >= 12 and start_Gen[1]==\"PM\": dayCount+=1 #Deciding", "list start_Gen = start.split() #based on spaces startTime = start_Gen[0].split(\":\") #the time based", "newDay=day else: ind = (ind+1)%7 for i in range(1,dayCount+1): newDay = days[ind] i=i+1", "{newDay} (next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} ({dayCount} days later)') return", "as a list start_Gen = start.split() #based on spaces startTime = start_Gen[0].split(\":\") #the", "start_Gen[1]==\"PM\": amPMTrack = \"AM\" else: amPMTrack=\"PM\" #print(\"Days: \"+str(dayCount)) hoursTotalFormatted = hoursTotal hoursTotalFormatted =", "#dayTotal = hoursTotal//12 #print(dayTotal) if day: days = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\",", "60 y = round(x) minsTotal = y #print(minsTotal) #print(hoursTotal) #print(minsTotal) #Counting days dayC", "starting time supplied duration=duration.split(\":\") #based on colon #print(duration) startMinutes = int(startTime[0])*60 + int(startTime[1])", "#print(dayTotal) if day: days = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"] #index", "the argument ind = 0 for i,myday in enumerate(days): if myday.lower()==day.lower(): ind=i break", "#print(\"Total minutes \" +str(startMinutes)) durationMinutes = int(duration[0])*60 + int(duration[1]) #Given duration time in", "+str(startMinutes)) durationMinutes = int(duration[0])*60 + int(duration[1]) #Given duration time in minutes #print(durationMinutes) totalDurationMinutes", "#print(totalDurationMinutes) #Getting total time in hours and minutes hoursTotal = totalDurationMinutes//60 #print(hoursTotal) x", "minsTotalformatted = '%02d' % minsTotal #dayTotal = hoursTotal//12 #print(dayTotal) if day: days =", "(dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} ({dayCount} days later)') return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay}') else:", "int(startTime[1]) #Start time in minutes #print(\"Total minutes \" +str(startMinutes)) durationMinutes = int(duration[0])*60 +", "(int(startTime[0]) + (int(startTime[1]) / 60 )) #print(startDay) dayCount = round(dayC) extrahours = hourrem+startDay", "/ 60 )) #print(startDay) dayCount = round(dayC) extrahours = hourrem+startDay extrahours = round(extrahours)", "myday.lower()==day.lower(): ind=i break #print(index) #print(days[index]) newDay = \"\" if dayCount==0: newDay=day else: ind", "= hoursTotal hoursTotalFormatted = hoursTotal%12 #print(hoursTotalFormatted) if hoursTotalFormatted == 0: hoursTotalFormatted=12 minsTotalformatted =", "duration,day=\"\"): #Splitting and storing the input as a list start_Gen = start.split() #based", "if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} (next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack},", "colon #print(duration) startMinutes = int(startTime[0])*60 + int(startTime[1]) #Start time in minutes #print(\"Total minutes", "(f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} ({dayCount} days later)') return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay}') else: if (dayCount==1):", "or PM #print(startTime) #Holding the starting time supplied duration=duration.split(\":\") #based on colon #print(duration)", "= round(x) minsTotal = y #print(minsTotal) #print(hoursTotal) #print(minsTotal) #Counting days dayC = ((totalDurationMinutes/60)", "if myday.lower()==day.lower(): ind=i break #print(index) #print(days[index]) newDay = \"\" if dayCount==0: newDay=day else:", "PM #print(startTime) #Holding the starting time supplied duration=duration.split(\":\") #based on colon #print(duration) startMinutes", "time in hours and minutes hoursTotal = totalDurationMinutes//60 #print(hoursTotal) x = round((totalDurationMinutes/60)%1,2) *", "- (startMinutes/60))/24 #print(dayC) dayrem = round((dayC)%1,2) #print(dayrem) hourrem = dayrem*24 startDay = (int(startTime[0])", "#the time based on colon #start_Gen[1] is holding either AM or PM #print(startTime)", "#print(\"Days: \"+str(dayCount)) hoursTotalFormatted = hoursTotal hoursTotalFormatted = hoursTotal%12 #print(hoursTotalFormatted) if hoursTotalFormatted == 0:", "= round(extrahours) #print(extrahours) if extrahours >= 12 and start_Gen[1]==\"PM\": dayCount+=1 #Deciding AM or", "return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} (next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} ({dayCount} days later)')", "round((totalDurationMinutes/60)%1,2) * 60 y = round(x) minsTotal = y #print(minsTotal) #print(hoursTotal) #print(minsTotal) #Counting", "({dayCount} days later)') return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay}') else: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}", "minutes hoursTotal = totalDurationMinutes//60 #print(hoursTotal) x = round((totalDurationMinutes/60)%1,2) * 60 y = round(x)", "time in minutes #print(\"Total minutes \" +str(startMinutes)) durationMinutes = int(duration[0])*60 + int(duration[1]) #Given", "{amPMTrack}, {newDay} (next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} ({dayCount} days later)')", "duration time in minutes #print(durationMinutes) totalDurationMinutes = startMinutes + durationMinutes #Total minutes with", "enumerate(days): if myday.lower()==day.lower(): ind=i break #print(index) #print(days[index]) newDay = \"\" if dayCount==0: newDay=day", "#Splitting and storing the input as a list start_Gen = start.split() #based on", "#print(hoursTotal) #print(minsTotal) #Counting days dayC = ((totalDurationMinutes/60) - (startMinutes/60))/24 #print(dayC) dayrem = round((dayC)%1,2)", "#print(days[index]) newDay = \"\" if dayCount==0: newDay=day else: ind = (ind+1)%7 for i", "extrahours >= 12 and start_Gen[1]==\"PM\": dayCount+=1 #Deciding AM or PM amPMTrack = start_Gen[1]", "startMinutes + durationMinutes #Total minutes with us #print(totalDurationMinutes) #Getting total time in hours", "time based on colon #start_Gen[1] is holding either AM or PM #print(startTime) #Holding", "the starting time supplied duration=duration.split(\":\") #based on colon #print(duration) startMinutes = int(startTime[0])*60 +", "day: days = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"] #index holds which", "#print(newDay) if day: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} (next day)') if (dayCount>1):", "dayCount+=1 #Deciding AM or PM amPMTrack = start_Gen[1] if extrahours>=12: if start_Gen[1]==\"PM\": amPMTrack", "\"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"] #index holds which day is sent in the", "#Given duration time in minutes #print(durationMinutes) totalDurationMinutes = startMinutes + durationMinutes #Total minutes", "minsTotal = y #print(minsTotal) #print(hoursTotal) #print(minsTotal) #Counting days dayC = ((totalDurationMinutes/60) - (startMinutes/60))/24", "#print(duration) startMinutes = int(startTime[0])*60 + int(startTime[1]) #Start time in minutes #print(\"Total minutes \"", "minsTotal #dayTotal = hoursTotal//12 #print(dayTotal) if day: days = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\",", "hoursTotal//12 #print(dayTotal) if day: days = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"]", "day is sent in the argument ind = 0 for i,myday in enumerate(days):", "#print(hoursTotal) x = round((totalDurationMinutes/60)%1,2) * 60 y = round(x) minsTotal = y #print(minsTotal)", "else: amPMTrack=\"PM\" #print(\"Days: \"+str(dayCount)) hoursTotalFormatted = hoursTotal hoursTotalFormatted = hoursTotal%12 #print(hoursTotalFormatted) if hoursTotalFormatted", "return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay}') else: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} (next day)') if", "{amPMTrack}, {newDay}') else: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} (next day)') if (dayCount>1): return", "on colon #start_Gen[1] is holding either AM or PM #print(startTime) #Holding the starting", "dayrem = round((dayC)%1,2) #print(dayrem) hourrem = dayrem*24 startDay = (int(startTime[0]) + (int(startTime[1]) /", "argument ind = 0 for i,myday in enumerate(days): if myday.lower()==day.lower(): ind=i break #print(index)", "= totalDurationMinutes//60 #print(hoursTotal) x = round((totalDurationMinutes/60)%1,2) * 60 y = round(x) minsTotal =", "int(duration[0])*60 + int(duration[1]) #Given duration time in minutes #print(durationMinutes) totalDurationMinutes = startMinutes +", "in minutes #print(\"Total minutes \" +str(startMinutes)) durationMinutes = int(duration[0])*60 + int(duration[1]) #Given duration", "= int(duration[0])*60 + int(duration[1]) #Given duration time in minutes #print(durationMinutes) totalDurationMinutes = startMinutes", "hourrem = dayrem*24 startDay = (int(startTime[0]) + (int(startTime[1]) / 60 )) #print(startDay) dayCount", "= dayrem*24 startDay = (int(startTime[0]) + (int(startTime[1]) / 60 )) #print(startDay) dayCount =", "= y #print(minsTotal) #print(hoursTotal) #print(minsTotal) #Counting days dayC = ((totalDurationMinutes/60) - (startMinutes/60))/24 #print(dayC)", "start.split() #based on spaces startTime = start_Gen[0].split(\":\") #the time based on colon #start_Gen[1]", "ind=i break #print(index) #print(days[index]) newDay = \"\" if dayCount==0: newDay=day else: ind =", "input as a list start_Gen = start.split() #based on spaces startTime = start_Gen[0].split(\":\")", "+ int(duration[1]) #Given duration time in minutes #print(durationMinutes) totalDurationMinutes = startMinutes + durationMinutes", "x = round((totalDurationMinutes/60)%1,2) * 60 y = round(x) minsTotal = y #print(minsTotal) #print(hoursTotal)", "= round((dayC)%1,2) #print(dayrem) hourrem = dayrem*24 startDay = (int(startTime[0]) + (int(startTime[1]) / 60", "round(extrahours) #print(extrahours) if extrahours >= 12 and start_Gen[1]==\"PM\": dayCount+=1 #Deciding AM or PM", "range(1,dayCount+1): newDay = days[ind] i=i+1 ind = (ind+1) % 7 #print(newDay) if day:", "holding either AM or PM #print(startTime) #Holding the starting time supplied duration=duration.split(\":\") #based", "'%02d' % minsTotal #dayTotal = hoursTotal//12 #print(dayTotal) if day: days = [\"Sunday\", \"Monday\",", "\"\" if dayCount==0: newDay=day else: ind = (ind+1)%7 for i in range(1,dayCount+1): newDay", ")) #print(startDay) dayCount = round(dayC) extrahours = hourrem+startDay extrahours = round(extrahours) #print(extrahours) if", "= hoursTotal%12 #print(hoursTotalFormatted) if hoursTotalFormatted == 0: hoursTotalFormatted=12 minsTotalformatted = '%02d' % minsTotal", "#based on spaces startTime = start_Gen[0].split(\":\") #the time based on colon #start_Gen[1] is", "start_Gen = start.split() #based on spaces startTime = start_Gen[0].split(\":\") #the time based on", "amPMTrack = \"AM\" else: amPMTrack=\"PM\" #print(\"Days: \"+str(dayCount)) hoursTotalFormatted = hoursTotal hoursTotalFormatted = hoursTotal%12", "{amPMTrack}, {newDay} ({dayCount} days later)') return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay}') else: if (dayCount==1): return", "based on colon #start_Gen[1] is holding either AM or PM #print(startTime) #Holding the", "for i,myday in enumerate(days): if myday.lower()==day.lower(): ind=i break #print(index) #print(days[index]) newDay = \"\"", "#print(dayrem) hourrem = dayrem*24 startDay = (int(startTime[0]) + (int(startTime[1]) / 60 )) #print(startDay)", "#print(startDay) dayCount = round(dayC) extrahours = hourrem+startDay extrahours = round(extrahours) #print(extrahours) if extrahours", "(ind+1) % 7 #print(newDay) if day: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} (next", "[\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"] #index holds which day is sent", "in hours and minutes hoursTotal = totalDurationMinutes//60 #print(hoursTotal) x = round((totalDurationMinutes/60)%1,2) * 60", "#Holding the starting time supplied duration=duration.split(\":\") #based on colon #print(duration) startMinutes = int(startTime[0])*60", "sent in the argument ind = 0 for i,myday in enumerate(days): if myday.lower()==day.lower():", "and minutes hoursTotal = totalDurationMinutes//60 #print(hoursTotal) x = round((totalDurationMinutes/60)%1,2) * 60 y =", "startDay = (int(startTime[0]) + (int(startTime[1]) / 60 )) #print(startDay) dayCount = round(dayC) extrahours", "= round(dayC) extrahours = hourrem+startDay extrahours = round(extrahours) #print(extrahours) if extrahours >= 12", "start_Gen[1]==\"PM\": dayCount+=1 #Deciding AM or PM amPMTrack = start_Gen[1] if extrahours>=12: if start_Gen[1]==\"PM\":", "= [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"] #index holds which day is", "which day is sent in the argument ind = 0 for i,myday in", "with us #print(totalDurationMinutes) #Getting total time in hours and minutes hoursTotal = totalDurationMinutes//60", "% minsTotal #dayTotal = hoursTotal//12 #print(dayTotal) if day: days = [\"Sunday\", \"Monday\", \"Tuesday\",", "start_Gen[0].split(\":\") #the time based on colon #start_Gen[1] is holding either AM or PM", "startTime = start_Gen[0].split(\":\") #the time based on colon #start_Gen[1] is holding either AM", "ind = 0 for i,myday in enumerate(days): if myday.lower()==day.lower(): ind=i break #print(index) #print(days[index])", "minutes #print(durationMinutes) totalDurationMinutes = startMinutes + durationMinutes #Total minutes with us #print(totalDurationMinutes) #Getting", "hoursTotalFormatted=12 minsTotalformatted = '%02d' % minsTotal #dayTotal = hoursTotal//12 #print(dayTotal) if day: days", "hours and minutes hoursTotal = totalDurationMinutes//60 #print(hoursTotal) x = round((totalDurationMinutes/60)%1,2) * 60 y", "\"AM\" else: amPMTrack=\"PM\" #print(\"Days: \"+str(dayCount)) hoursTotalFormatted = hoursTotal hoursTotalFormatted = hoursTotal%12 #print(hoursTotalFormatted) if", "y #print(minsTotal) #print(hoursTotal) #print(minsTotal) #Counting days dayC = ((totalDurationMinutes/60) - (startMinutes/60))/24 #print(dayC) dayrem", "= hourrem+startDay extrahours = round(extrahours) #print(extrahours) if extrahours >= 12 and start_Gen[1]==\"PM\": dayCount+=1", "hoursTotalFormatted = hoursTotal hoursTotalFormatted = hoursTotal%12 #print(hoursTotalFormatted) if hoursTotalFormatted == 0: hoursTotalFormatted=12 minsTotalformatted", "\"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"] #index holds which day is sent in the argument", "hoursTotalFormatted = hoursTotal%12 #print(hoursTotalFormatted) if hoursTotalFormatted == 0: hoursTotalFormatted=12 minsTotalformatted = '%02d' %", "(dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} (next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay}", "hoursTotal hoursTotalFormatted = hoursTotal%12 #print(hoursTotalFormatted) if hoursTotalFormatted == 0: hoursTotalFormatted=12 minsTotalformatted = '%02d'", "def add_time(start, duration,day=\"\"): #Splitting and storing the input as a list start_Gen =", "== 0: hoursTotalFormatted=12 minsTotalformatted = '%02d' % minsTotal #dayTotal = hoursTotal//12 #print(dayTotal) if", "on colon #print(duration) startMinutes = int(startTime[0])*60 + int(startTime[1]) #Start time in minutes #print(\"Total", "#print(extrahours) if extrahours >= 12 and start_Gen[1]==\"PM\": dayCount+=1 #Deciding AM or PM amPMTrack", "int(startTime[0])*60 + int(startTime[1]) #Start time in minutes #print(\"Total minutes \" +str(startMinutes)) durationMinutes =", "+ durationMinutes #Total minutes with us #print(totalDurationMinutes) #Getting total time in hours and", "#Getting total time in hours and minutes hoursTotal = totalDurationMinutes//60 #print(hoursTotal) x =", "= round((totalDurationMinutes/60)%1,2) * 60 y = round(x) minsTotal = y #print(minsTotal) #print(hoursTotal) #print(minsTotal)", "a list start_Gen = start.split() #based on spaces startTime = start_Gen[0].split(\":\") #the time", "#based on colon #print(duration) startMinutes = int(startTime[0])*60 + int(startTime[1]) #Start time in minutes", "totalDurationMinutes = startMinutes + durationMinutes #Total minutes with us #print(totalDurationMinutes) #Getting total time", "(next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} ({dayCount} days later)') return (f'{hoursTotalFormatted}:{minsTotalformatted}", "{newDay} ({dayCount} days later)') return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay}') else: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted}", "#print(dayC) dayrem = round((dayC)%1,2) #print(dayrem) hourrem = dayrem*24 startDay = (int(startTime[0]) + (int(startTime[1])", "total time in hours and minutes hoursTotal = totalDurationMinutes//60 #print(hoursTotal) x = round((totalDurationMinutes/60)%1,2)", "* 60 y = round(x) minsTotal = y #print(minsTotal) #print(hoursTotal) #print(minsTotal) #Counting days", "and storing the input as a list start_Gen = start.split() #based on spaces", "minutes with us #print(totalDurationMinutes) #Getting total time in hours and minutes hoursTotal =", "hoursTotalFormatted == 0: hoursTotalFormatted=12 minsTotalformatted = '%02d' % minsTotal #dayTotal = hoursTotal//12 #print(dayTotal)", "in range(1,dayCount+1): newDay = days[ind] i=i+1 ind = (ind+1) % 7 #print(newDay) if", "extrahours>=12: if start_Gen[1]==\"PM\": amPMTrack = \"AM\" else: amPMTrack=\"PM\" #print(\"Days: \"+str(dayCount)) hoursTotalFormatted = hoursTotal", "0 for i,myday in enumerate(days): if myday.lower()==day.lower(): ind=i break #print(index) #print(days[index]) newDay =", "(next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} ({dayCount} days later)') return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}')", "minutes \" +str(startMinutes)) durationMinutes = int(duration[0])*60 + int(duration[1]) #Given duration time in minutes", "int(duration[1]) #Given duration time in minutes #print(durationMinutes) totalDurationMinutes = startMinutes + durationMinutes #Total", "\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"] #index holds which day is sent in", "if extrahours>=12: if start_Gen[1]==\"PM\": amPMTrack = \"AM\" else: amPMTrack=\"PM\" #print(\"Days: \"+str(dayCount)) hoursTotalFormatted =", "= (ind+1) % 7 #print(newDay) if day: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay}", "= \"\" if dayCount==0: newDay=day else: ind = (ind+1)%7 for i in range(1,dayCount+1):", "(f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} (next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} ({dayCount} days later)') return", "storing the input as a list start_Gen = start.split() #based on spaces startTime", "#print(minsTotal) #Counting days dayC = ((totalDurationMinutes/60) - (startMinutes/60))/24 #print(dayC) dayrem = round((dayC)%1,2) #print(dayrem)", "\"Friday\", \"Saturday\"] #index holds which day is sent in the argument ind =", "us #print(totalDurationMinutes) #Getting total time in hours and minutes hoursTotal = totalDurationMinutes//60 #print(hoursTotal)", "i,myday in enumerate(days): if myday.lower()==day.lower(): ind=i break #print(index) #print(days[index]) newDay = \"\" if", "in enumerate(days): if myday.lower()==day.lower(): ind=i break #print(index) #print(days[index]) newDay = \"\" if dayCount==0:", "else: ind = (ind+1)%7 for i in range(1,dayCount+1): newDay = days[ind] i=i+1 ind", "is sent in the argument ind = 0 for i,myday in enumerate(days): if", "= days[ind] i=i+1 ind = (ind+1) % 7 #print(newDay) if day: if (dayCount==1):", "if start_Gen[1]==\"PM\": amPMTrack = \"AM\" else: amPMTrack=\"PM\" #print(\"Days: \"+str(dayCount)) hoursTotalFormatted = hoursTotal hoursTotalFormatted", "#Counting days dayC = ((totalDurationMinutes/60) - (startMinutes/60))/24 #print(dayC) dayrem = round((dayC)%1,2) #print(dayrem) hourrem", "if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} (next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} ({dayCount}", "= ((totalDurationMinutes/60) - (startMinutes/60))/24 #print(dayC) dayrem = round((dayC)%1,2) #print(dayrem) hourrem = dayrem*24 startDay", "in the argument ind = 0 for i,myday in enumerate(days): if myday.lower()==day.lower(): ind=i", "amPMTrack = start_Gen[1] if extrahours>=12: if start_Gen[1]==\"PM\": amPMTrack = \"AM\" else: amPMTrack=\"PM\" #print(\"Days:", "#print(startTime) #Holding the starting time supplied duration=duration.split(\":\") #based on colon #print(duration) startMinutes =", "= 0 for i,myday in enumerate(days): if myday.lower()==day.lower(): ind=i break #print(index) #print(days[index]) newDay", "i in range(1,dayCount+1): newDay = days[ind] i=i+1 ind = (ind+1) % 7 #print(newDay)", ">= 12 and start_Gen[1]==\"PM\": dayCount+=1 #Deciding AM or PM amPMTrack = start_Gen[1] if", "#Total minutes with us #print(totalDurationMinutes) #Getting total time in hours and minutes hoursTotal", "AM or PM #print(startTime) #Holding the starting time supplied duration=duration.split(\":\") #based on colon", "60 )) #print(startDay) dayCount = round(dayC) extrahours = hourrem+startDay extrahours = round(extrahours) #print(extrahours)", "extrahours = hourrem+startDay extrahours = round(extrahours) #print(extrahours) if extrahours >= 12 and start_Gen[1]==\"PM\":", "ind = (ind+1) % 7 #print(newDay) if day: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack},", "colon #start_Gen[1] is holding either AM or PM #print(startTime) #Holding the starting time", "\"+str(dayCount)) hoursTotalFormatted = hoursTotal hoursTotalFormatted = hoursTotal%12 #print(hoursTotalFormatted) if hoursTotalFormatted == 0: hoursTotalFormatted=12", "#Start time in minutes #print(\"Total minutes \" +str(startMinutes)) durationMinutes = int(duration[0])*60 + int(duration[1])", "round(dayC) extrahours = hourrem+startDay extrahours = round(extrahours) #print(extrahours) if extrahours >= 12 and", "extrahours = round(extrahours) #print(extrahours) if extrahours >= 12 and start_Gen[1]==\"PM\": dayCount+=1 #Deciding AM", "#index holds which day is sent in the argument ind = 0 for", "holds which day is sent in the argument ind = 0 for i,myday", "= start_Gen[0].split(\":\") #the time based on colon #start_Gen[1] is holding either AM or", "return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} ({dayCount} days later)') return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay}') else: if", "dayCount = round(dayC) extrahours = hourrem+startDay extrahours = round(extrahours) #print(extrahours) if extrahours >=", "days = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"] #index holds which day", "(startMinutes/60))/24 #print(dayC) dayrem = round((dayC)%1,2) #print(dayrem) hourrem = dayrem*24 startDay = (int(startTime[0]) +", "#print(minsTotal) #print(hoursTotal) #print(minsTotal) #Counting days dayC = ((totalDurationMinutes/60) - (startMinutes/60))/24 #print(dayC) dayrem =", "(ind+1)%7 for i in range(1,dayCount+1): newDay = days[ind] i=i+1 ind = (ind+1) %", "round((dayC)%1,2) #print(dayrem) hourrem = dayrem*24 startDay = (int(startTime[0]) + (int(startTime[1]) / 60 ))", "12 and start_Gen[1]==\"PM\": dayCount+=1 #Deciding AM or PM amPMTrack = start_Gen[1] if extrahours>=12:", "if dayCount==0: newDay=day else: ind = (ind+1)%7 for i in range(1,dayCount+1): newDay =", "= int(startTime[0])*60 + int(startTime[1]) #Start time in minutes #print(\"Total minutes \" +str(startMinutes)) durationMinutes", "(int(startTime[1]) / 60 )) #print(startDay) dayCount = round(dayC) extrahours = hourrem+startDay extrahours =", "either AM or PM #print(startTime) #Holding the starting time supplied duration=duration.split(\":\") #based on", "if extrahours >= 12 and start_Gen[1]==\"PM\": dayCount+=1 #Deciding AM or PM amPMTrack =", "minutes #print(\"Total minutes \" +str(startMinutes)) durationMinutes = int(duration[0])*60 + int(duration[1]) #Given duration time", "newDay = \"\" if dayCount==0: newDay=day else: ind = (ind+1)%7 for i in", "i=i+1 ind = (ind+1) % 7 #print(newDay) if day: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted}", "time supplied duration=duration.split(\":\") #based on colon #print(duration) startMinutes = int(startTime[0])*60 + int(startTime[1]) #Start", "if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} ({dayCount} days later)') return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay}')", "#print(index) #print(days[index]) newDay = \"\" if dayCount==0: newDay=day else: ind = (ind+1)%7 for", "#Deciding AM or PM amPMTrack = start_Gen[1] if extrahours>=12: if start_Gen[1]==\"PM\": amPMTrack =", "((totalDurationMinutes/60) - (startMinutes/60))/24 #print(dayC) dayrem = round((dayC)%1,2) #print(dayrem) hourrem = dayrem*24 startDay =", "newDay = days[ind] i=i+1 ind = (ind+1) % 7 #print(newDay) if day: if", "else: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} (next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}", "dayrem*24 startDay = (int(startTime[0]) + (int(startTime[1]) / 60 )) #print(startDay) dayCount = round(dayC)", "dayCount==0: newDay=day else: ind = (ind+1)%7 for i in range(1,dayCount+1): newDay = days[ind]", "is holding either AM or PM #print(startTime) #Holding the starting time supplied duration=duration.split(\":\")", "0: hoursTotalFormatted=12 minsTotalformatted = '%02d' % minsTotal #dayTotal = hoursTotal//12 #print(dayTotal) if day:", "(f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay}') else: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} (next day)') if (dayCount>1):", "day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} ({dayCount} days later)') return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack},", "ind = (ind+1)%7 for i in range(1,dayCount+1): newDay = days[ind] i=i+1 ind =", "add_time(start, duration,day=\"\"): #Splitting and storing the input as a list start_Gen = start.split()", "supplied duration=duration.split(\":\") #based on colon #print(duration) startMinutes = int(startTime[0])*60 + int(startTime[1]) #Start time", "durationMinutes #Total minutes with us #print(totalDurationMinutes) #Getting total time in hours and minutes", "PM amPMTrack = start_Gen[1] if extrahours>=12: if start_Gen[1]==\"PM\": amPMTrack = \"AM\" else: amPMTrack=\"PM\"", "days later)') return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay}') else: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} (next", "+ int(startTime[1]) #Start time in minutes #print(\"Total minutes \" +str(startMinutes)) durationMinutes = int(duration[0])*60", "days[ind] i=i+1 ind = (ind+1) % 7 #print(newDay) if day: if (dayCount==1): return", "durationMinutes = int(duration[0])*60 + int(duration[1]) #Given duration time in minutes #print(durationMinutes) totalDurationMinutes =", "duration=duration.split(\":\") #based on colon #print(duration) startMinutes = int(startTime[0])*60 + int(startTime[1]) #Start time in", "% 7 #print(newDay) if day: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} (next day)')", "in minutes #print(durationMinutes) totalDurationMinutes = startMinutes + durationMinutes #Total minutes with us #print(totalDurationMinutes)", "#print(durationMinutes) totalDurationMinutes = startMinutes + durationMinutes #Total minutes with us #print(totalDurationMinutes) #Getting total", "later)') return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay}') else: if (dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} (next day)')", "for i in range(1,dayCount+1): newDay = days[ind] i=i+1 ind = (ind+1) % 7", "\" +str(startMinutes)) durationMinutes = int(duration[0])*60 + int(duration[1]) #Given duration time in minutes #print(durationMinutes)", "= startMinutes + durationMinutes #Total minutes with us #print(totalDurationMinutes) #Getting total time in", "(dayCount==1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} (next day)') if (dayCount>1): return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} ({dayCount} days" ]
[]
[ "}, \"color\": \"#4f81bd\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\":", "format name, as used in DescribeCoverage XML \"GeoTIFF\": { \"renderer\": \"datacube_ows.wcs_utils.get_tiff\", # The", "with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 1234}') @pytest.fixture def s3_config_nested_1(s3, s3_config_simple): config_uri =", "with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 3222, \"things\": [{\"test\": 2572, \"thing\": null}, \\", "{ \"components\": { \"red\": {\"red\": 1.0}, \"green\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\":", "[ { \"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", }, {", "\"components\": { \"red\": {\"red\": 1.0}, \"green\": ndvi, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50,", "\"#776857\", }, { \"title\": \"Water\", \"abstract\": \"\", \"flags\": { \"water_observed\": True, }, \"color\":", "\"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 0.5), \"scale_to\": (0, 255) } }, }, \"scale_range\":", "= [ (\"x\", [-1.0, -0.5, 0.0, 0.5, 1.0]), (\"y\", [-1.0, -0.5, 0.0, 0.5,", "Apache-2.0 import datetime import time from unittest.mock import MagicMock import numpy as np", "null_mask(): return dummy_da(True, \"mask\", coords, dtype=np.bool) @pytest.fixture def dummy_raw_calc_data(): dim_coords = [-2.0, -1.0,", "}, \"description\": \"Real, real ugly\", }, \"impossible\": { \"bits\": 0, \"values\": { '0':", "import app with app.test_client() as client: yield client @pytest.fixture def minimal_dc(): dc =", "\"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\": [ { \"title\": \"Water\", \"abstract\": \"\",", "{ \"or\": { \"noncontiguous\": True, \"nodata\": True, }, }, \"alpha\": 0.0, \"color\": \"#ffffff\",", "\"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"SaddleBrown\", }, ] } },", "data[\"red\"]) return [ { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\":", "\"All splodgy looking\" }, \"splodgy\": { \"bits\": 2, \"values\": { '0': \"Splodgeless\", '1':", "\"scale_from\": (0.0, 0.5), \"scale_to\": (0, 255) } }, }, \"scale_range\": (50, 3000), },", "\"foo_nativeres\": nb, \"foo_nonativeres\": nb, \"foo_badnativecrs\": nb, \"foo_nativecrs\": nb, \"foo_nonativecrs\": nb, \"foo\": nb, \"bar\":", "\"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", }, { \"title\": \"Dry\", \"abstract\":", "}, {\"value\": 0.5, \"color\": \"#707030\", }, {\"value\": 1.0, \"color\": \"#FF9090\", }, ], \"multi_date\":", "\"right\": 0.1, }, \"EPSG:3577\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, },", "{ \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": { \"default_style\": \"band1\", \"styles\": [ { \"name\": \"band1\",", "{ \"water\": [ { \"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\",", "\"abstract\": \"\", \"flags\": { \"or\": { \"noncontiguous\": True, \"nodata\": True, }, }, \"alpha\":", "of datacube-ows, part of the Open Data Cube project. # See https://opendatacube.org for", "f_open: f_open.write(b'{\"include\": \"simple.json\", \"type\": \"json\"}') @pytest.fixture def s3_config_nested_2(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_2.json\" with", "Flag rules can contain an \"and\" - they match if all of the", "\"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"SaddleBrown\", }, ] } }, {", "coords, dtype=np.int16), \"swir1\": dummy_da(1051, \"swir1\", coords, dtype=np.int16), \"swir2\": dummy_da(1051, \"swir2\", coords, dtype=np.int16), })", "{\"value\": 0.01, \"color\": \"#303000\", }, {\"value\": 0.5, \"color\": \"#707030\", }, {\"value\": 1.0, \"color\":", "\"s3://testbucket/nested_4.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 3222, \"things\": [{\"test\": 2572, \"thing\": null},", "-999, \"band2\": -999, \"band3\": float(\"nan\"), \"band4\": \"nan\", } lmo = MagicMock() lmo.loc =", "\"splodgy\": { \"bits\": 2, \"values\": { '0': \"Splodgeless\", '1': \"Splodgy\", }, \"description\": \"All", "\"#0000FF\"}, {\"value\": -0.2, \"color\": \"#005050\", }, {\"value\": -0.1, \"color\": \"#505050\", }, {\"value\": -0.01,", "}) return output @pytest.fixture def dummy_raw_wo_data(): output = xr.Dataset({ \"water\": dummy_da(0b101, \"red\", coords,", "\"Terrain\", \"abstract\": \"\", # Flag rules can contain an \"or\" - they match", "{ \"title\": \"High Slopes\", \"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\": \"Brown\", }, {", "\"value\": 0.3, \"color\": \"#703070\", }, { \"value\": 0.6, \"color\": \"#e0e070\", }, { \"value\":", "False, '1': \"Woah!\" }, \"description\": \"Won't happen. Can't happen. Might happen.\", }, }", "}, { # Mask out pixels with low_solar_angle, high_slope # or cloud shadow.", "\"mime\": \"image/geotiff\", # The file extension to add to the filename. \"extension\": \"tif\",", "\"high_slope\": { \"description\": \"High slope\", \"bits\": 4, \"values\": { '0': False, '1': True", "\"values\": { '0': \"Bland\", '1': \"Tasty\", }, \"description\": \"All splodgy looking\" }, \"splodgy\":", "they match if either of the conditions hold. \"flags\": {\"or\": {\"terrain_shadow\": True, \"high_slope\":", "0, \"description\": \"No data\", \"values\": { '0': False, '1': True }, }, \"noncontiguous\":", "https://github.com/dask/s3fs/blob/main/s3fs/tests/test_s3fs.py import subprocess proc = subprocess.Popen([\"moto_server\", \"s3\", \"-p\", MOTO_PORT]) timeout = 5 while", "{ \"components\": { \"red\": { \"red\": 0.333, \"green\": 0.333, \"blue\": 0.333, }, \"green\":", "the filename. \"extension\": \"tif\", # Whether or not the file format supports multiple", "nb, \"foo_badnativecrs\": nb, \"foo_nativecrs\": nb, \"foo_nonativecrs\": nb, \"foo\": nb, \"bar\": nb, } dc.list_measurements.return_value", "\"title\": \"Shaded Water\", \"abstract\": \"\", \"flags\": { \"and\": { \"water_observed\": True, \"cloud_shadow\": True", "flask_client(monkeypatch): monkeypatch.setenv(\"DEFER_CFG_PARSE\", \"yes\") from datacube_ows.ogc import app with app.test_client() as client: yield client", "dummy_da(0b101, \"red\", coords, dtype=np.uint8, attrs = { \"flags_definition\": { \"nodata\": { \"bits\": 0,", "= requests.get(MOTO_S3_ENDPOINT_URI) if r.ok: break except: pass timeout -= 0.1 time.sleep(0.1) yield proc.terminate()", "from tests.utils import (MOTO_PORT, MOTO_S3_ENDPOINT_URI, coords, dim1_da, dummy_da) def get_boto3_client(): from botocore.session import", "lmo.loc = { \"foo_nativeres\": nb, \"foo_nonativeres\": nb, \"foo_badnativecrs\": nb, \"foo_nativecrs\": nb, \"foo_nonativecrs\": nb,", "# # Copyright (c) 2017-2021 OWS Contributors # SPDX-License-Identifier: Apache-2.0 import datetime import", "False, \"horizontal_coord\": \"horrible_zonts\", \"vertical_coord\": \"vertex_calories\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/CRS\", \"alias_of\": None, }, \"TEST:NATIVE_CRS\":", "or cloud shadow. \"band\": \"water\", \"flags\": { \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False,", "@pytest.fixture def s3_base(): # writable local S3 system # adapted from https://github.com/dask/s3fs/blob/main/s3fs/tests/test_s3fs.py import", "in DescribeCoverage XML \"GeoTIFF\": { \"renderer\": \"datacube_ows.wcs_utils.get_tiff\", # The MIME type of the", "False, \"noncontiguous\": False, \"terrain_shadow\": False, \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False, \"cloud\": False,", "} xyt_coords = [ (\"x\", [-1.0, -0.5, 0.0, 0.5, 1.0]), (\"y\", [-1.0, -0.5,", "\"styles\": [ { \"name\": \"band1\", \"title\": \"Single Band Test Style\", \"abstract\": \"\", \"components\":", "happen. Might happen.\", }, } }) }) return output @pytest.fixture def dummy_raw_ls_data(): output", "(0, 255) } }, }, \"scale_range\": (50, 3000), }, { \"index_function\": { \"function\":", "Calculate NDVI (-1.0 to 1.0) unscaled = (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] +", "\"#707030\", }, {\"value\": 1.0, \"color\": \"#FF9090\", }, ] }, { \"index_function\": { \"function\":", "\"SaddleBrown\", }, ] } }, { \"value_map\": { \"water\": [ { # Make", "}, \"EPSG:3577\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, \"EPSG:3857\": {\"top\":", "[0b01000, 0b11001, 0b01010, 0b10011, 0b00100, 0b10111], dim_coords, attrs={ \"flags_definition\": { \"joviality\": { \"bits\":", "See https://opendatacube.org for more information. # # Copyright (c) 2017-2021 OWS Contributors #", "}, { \"components\": { \"red\": { \"swir1\": 1.0, \"scale_range\": (1500, 3700), }, \"green\":", "@pytest.fixture def s3_config_nested_2(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_2.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'[{\"test\":", "file extension to add to the filename. \"extension\": \"tif\", # Whether or not", "datetime.datetime(2021, 1, 1, 22, 44, 5), datetime.datetime.now() ]) ] @pytest.fixture def xyt_dummydata(): return", "used in DescribeCoverage XML \"GeoTIFF\": { \"renderer\": \"datacube_ows.wcs_utils.get_tiff\", # The MIME type of", "} }, \"blue\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"green\", \"band2\": \"nir\", \"scale_from\":", "}, { \"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"SlateGray\", },", "\"color\": \"#FF9090\", }, ], \"multi_date\": [ { \"allowed_count_range\": [2, 2], \"preserve_user_date_order\": True, \"aggregator_function\":", "{ \"band\": \"water\", \"flags\": { \"nodata\": False, \"noncontiguous\": False, \"terrain_shadow\": False, \"low_solar_angle\": False,", "\"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000, 8000), }, { \"components\":", "\"longitude\", \"vertical_coord\": \"latitude\", \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/4326\", \"alias_of\": None, }, \"EPSG:3577\": { \"geographic\": False, \"horizontal_coord\":", "} @pytest.fixture def minimal_multiprod_cfg(): return { \"title\": \"The Title\", \"abstract\": \"The Abstract\", \"name\":", "@scalable def scaled_ndvi(data): # Calculate NDVI (-1.0 to 1.0) return (data[\"nir\"] - data[\"red\"])", "return output @pytest.fixture def dummy_raw_fc_plus_wo(dummy_raw_fc_data, dummy_raw_wo_data): return xarray.combine_by_coords( [dummy_raw_fc_data, dummy_raw_wo_data], join=\"exact\") @pytest.fixture def", "\"gml_name\": \"TEST/CRS\", \"alias_of\": None, }, \"TEST:NATIVE_CRS\": { \"geographic\": False, \"horizontal_coord\": \"hortizonal_cults\", \"vertical_coord\": \"verbal_tics\",", "200, 1000], dim_coords), \"red\": dim1_da(\"red\", [200, 500, 0, 200, 200, 700], dim_coords), \"green\":", "\"bits\": 6, \"values\": { '0': False, '1': True }, }, \"water_observed\": { \"description\":", "s3_config_nested_3): config_uri = \"s3://testbucket/nested_4.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 3222, \"things\": [{\"test\":", "} }) }) return output def dim1_null_mask(coords): return dim1_da(\"mask\", [True] * len(coords), coords)", "1024] } ] } } @pytest.fixture def mock_range(): times = [datetime.datetime(2010, 1, 1),", "\"description\": \"Classified as water by the decision tree\", \"bits\": 7, \"values\": { '0':", "[ { \"allowed_count_range\": [2, 2], \"preserve_user_date_order\": True, \"aggregator_function\": { \"function\": \"datacube_ows.band_utils.multi_date_delta\" }, \"mpl_ramp\":", "\"SlateGray\", }, { \"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", },", "\"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0}}, \"scale_range\": [0.0, 100.0], },", "time.sleep(0.1) yield proc.terminate() proc.wait() @pytest.fixture() def s3(s3_base, monkeypatch): monkeypatch.setenv(\"AWS_ACCESS_KEY_ID\", \"foo\") monkeypatch.setenv(\"AWS_SECRET_ACCESS_KEY\", \"bar\") client", "s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 9364, \\ \"subtest\": {\"test_py\": {\"include\": \"tests.cfg.simple.simple\", \"type\": \"python\"},", "as f_open: f_open.write(b'{\"test\": 9364, \\ \"subtest\": {\"test_py\": {\"include\": \"tests.cfg.simple.simple\", \"type\": \"python\"}, \\ \"test_json\":", "{\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [ { \"value\": -1.0, \"color\": \"#000000\", \"alpha\":", "= xr.Dataset({ \"bs\": dummy_da(546, \"bs\", coords, dtype=np.int16), \"pv\": dummy_da(723, \"pv\", coords, dtype=np.int16), \"npv\":", "# Whether or not the file format supports multiple time slices. \"multi-time\": False", "\"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 0.5), \"scale_to\": (0, 255) }", "@pytest.fixture def null_mask(): return dummy_da(True, \"mask\", coords, dtype=np.bool) @pytest.fixture def dummy_raw_calc_data(): dim_coords =", "return (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) return [ { \"components\": {", "yield s3 @pytest.fixture def s3_config_simple(s3): config_uri = \"s3://testbucket/simple.json\" with s3.open(config_uri, \"wb\") as f_open:", "\"EPSG:3577\": { \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3577\", \"alias_of\":", "}, \"terrain_shadow\": { \"description\": \"Terrain shadow\", \"bits\": 3, \"values\": { '0': False, '1':", "\"type\": \"json\"}}}' ) @pytest.fixture def flask_client(monkeypatch): monkeypatch.setenv(\"DEFER_CFG_PARSE\", \"yes\") from datacube_ows.ogc import app with", "{ \"value\": 0.0, \"color\": \"#000000\", \"alpha\": 0.0, }, { \"value\": 0.1, \"color\": \"#000030\",", "to add to the filename. \"extension\": \"tif\", # Whether or not the file", "\"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"SaddleBrown\", }, ] } }, { \"value_map\":", "\"flags\": { \"and\": { \"high_slope\": True, \"cloud\": True } }, \"color\": \"#f2dcb4\", },", "\"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3577\", \"alias_of\": None, }, \"TEST:CRS\": { \"geographic\": False, \"horizontal_coord\": \"horrible_zonts\",", "\"#FF9090\", }, ] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\":", "\"high_slope\": True}}, \"color\": \"SlateGray\", }, { \"title\": \"Cloud Shadow and High Slope\", \"abstract\":", "\"alpha\": 0.0, }, { \"value\": 0.0, \"color\": \"#000000\", \"alpha\": 0.0, }, { \"value\":", "{ \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"#c2c1c0\", }, { \"title\":", "= \"s3://testbucket/nested_3.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 2222, \"things\": [{\"test\": 22562, \"thing\":", "}, }, \"low_solar_angle\": { \"description\": \"Low solar incidence angle\", \"bits\": 2, \"values\": {", "[] } @pytest.fixture def wcs_global_cfg(): return { \"formats\": { # Key is the", "}, } }) }) return output def dim1_null_mask(coords): return dim1_da(\"mask\", [True] * len(coords),", "1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\":", "}, }, } }) }) return output @pytest.fixture def dummy_raw_fc_data(): output = xr.Dataset({", "- they match if all of the conditions hold. \"flags\": {\"and\": {\"cloud_shadow\": True,", "-3.0] output = xr.Dataset({ \"ir\": dim1_da(\"ir\", [800, 100, 1000, 600, 200, 1000], dim_coords),", "@pytest.fixture def dummy_raw_fc_data(): output = xr.Dataset({ \"bs\": dummy_da(546, \"bs\", coords, dtype=np.int16), \"pv\": dummy_da(723,", "\"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"swir1\":", "missing or saturated\", \"bits\": 1, \"values\": { '0': False, '1': True }, },", "data\", \"values\": { '0': False, '1': True }, }, \"noncontiguous\": { \"description\": \"At", "] @pytest.fixture def configs_for_wofs(): return [ { \"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\",", "coords, dtype=np.int16), }) return output @pytest.fixture def dummy_raw_wo_data(): output = xr.Dataset({ \"water\": dummy_da(0b101,", "\"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\": \"#776857\", }, { \"title\": \"Water\", \"abstract\": \"\",", "\"invert\": True, } ] }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\":", "\"alias_of\": None, }, \"TEST:CRS\": { \"geographic\": False, \"horizontal_coord\": \"horrible_zonts\", \"vertical_coord\": \"vertex_calories\", \"vertical_coord_first\": False,", "return { \"title\": \"The Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"product_name\": \"foo\", \"image_processing\":", "'0': False, '1': True }, \"description\": \"Real, real ugly\", }, \"impossible\": { \"bits\":", "\"pq\": dim1_da(\"pq\", [0b01000, 0b11001, 0b01010, 0b10011, 0b00100, 0b10111], dim_coords, attrs={ \"flags_definition\": { \"joviality\":", "\"values\": { '0': \"Splodgeless\", '1': \"Splodgy\", }, \"description\": \"All splodgy looking\" }, \"ugly\":", "output = xr.Dataset({ \"ir\": dummy_da(3, \"ir\", coords), \"red\": dummy_da(5, \"red\", coords), \"green\": dummy_da(7,", "(0.0, 1.0), \"scale_to\": (0, 255) } }, \"blue\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {", "}, { \"components\": { \"red\": { \"red\": 0.333, \"green\": 0.333, \"blue\": 0.333, },", "{ # WGS-84 \"geographic\": True, \"vertical_coord_first\": True, \"horizontal_coord\": \"longitude\", \"vertical_coord\": \"latitude\", \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/4326\",", "{ \"title\": \"The Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"multi_product\": True, \"product_names\": [\"foo\",", "\"\", \"flags\": { \"or\": { \"terrain_shadow\": True, \"low_solar_angle\": True }, }, \"color\": \"#2f2922\",", "\"water_observed\": True, }, \"color\": \"#4f81bd\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\":", "True }, }, \"low_solar_angle\": { \"description\": \"Low solar incidence angle\", \"bits\": 2, \"values\":", "0.0, -1.0, -2.0, -3.0] output = xr.Dataset({ \"pq\": dim1_da(\"pq\", [0b01000, 0b11001, 0b01010, 0b10011,", "\"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"Brown\", }, ] } }, { \"name\":", "-1.0, -2.0, -3.0] return dim1_da(\"mask\", [True] * len(dim_coords), dim_coords) @pytest.fixture def dummy_col_map_data(): dim_coords", "False }, \"netCDF\": { \"renderer\": \"datacube_ows.wcs_utils.get_netcdf\", \"mime\": \"application/x-netcdf\", \"extension\": \"nc\", \"multi-time\": True, }", "\\ \"subtest\": {\"test_py\": {\"include\": \"tests.cfg.simple.simple\", \"type\": \"python\"}, \\ \"test_json\": {\"include\": \"tests/cfg/simple.json\", \"type\": \"json\"}}}'", "}, \"blue\": {\"green\": 1.0}, }, \"scale_range\": (200, 1900), }, { \"components\": { \"red\":", "(1600, 3200), }, \"blue\": {\"green\": 1.0}, }, \"scale_range\": (200, 1900), }, { \"components\":", "0.0, 0.5, 1.0]), (\"time\", [ datetime.datetime(2021, 1, 1, 22, 44, 5), datetime.datetime.now() ])", "\"vertical_coord_first\": False, \"gml_name\": \"TEST/NATIVE_CRS\", \"alias_of\": None, }, } global_cfg.folder_index = { \"folder.existing_folder\": MagicMock(),", "\"moo\": {\"bits\": 0}, \"floop\": {\"bits\": 1}, \"blat\": {\"bits\": 2}, \"pow\": {\"bits\": 3}, \"zap\":", "The MIME type of the image, as used in the Http Response. \"mime\":", "dtype=np.bool) @pytest.fixture def dummy_raw_calc_data(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] output", "elif 'nativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:4326\" else: pass if 'nonativeres' in s:", "{ \"geographic\": False, \"horizontal_coord\": \"hortizonal_cults\", \"vertical_coord\": \"verbal_tics\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/NATIVE_CRS\", \"alias_of\": None,", "\"datacube_ows.wcs_utils.get_tiff\", # The MIME type of the image, as used in the Http", "s3_config_nested_3(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_3.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 2222, \"things\":", "\"bits\": 1, \"values\": { '0': False, '1': True }, }, \"low_solar_angle\": { \"description\":", "{ \"title\": \"Shaded Water\", \"abstract\": \"\", \"flags\": { \"and\": { \"water_observed\": True, \"cloud_shadow\":", "}) }) return output @pytest.fixture def dummy_raw_ls_data(): output = xr.Dataset({ \"red\": dummy_da(5, \"red\",", "Water\", \"abstract\": \"\", \"flags\": { \"and\": { \"water_observed\": True, \"cloud\": True } },", "\"http://www.opengis.net/def/crs/EPSG/0/4326\", \"alias_of\": None, }, \"EPSG:3577\": { \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\":", "def xyt_dummydata(): return xarray.Dataset({ \"red\": dummy_da(1400, \"red\", xyt_coords, dtype=\"int16\"), \"green\": dummy_da(700, \"green\", xyt_coords,", "}, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"red\":", "\"blue\", coords), \"uv\": dummy_da(-1, \"uv\", coords), }) return output @pytest.fixture def null_mask(): return", "\"formats\": { # Key is the format name, as used in DescribeCoverage XML", "2], \"preserve_user_date_order\": True, \"aggregator_function\": { \"function\": \"datacube_ows.band_utils.multi_date_delta\" }, \"mpl_ramp\": \"RdYlBu\", \"range\": [-1.0, 1.0],", "dummy_da) def get_boto3_client(): from botocore.session import Session session = Session() return session.create_client(\"s3\", endpoint_url=MOTO_S3_ENDPOINT_URI)", "\"green\": dummy_da(7, \"green\", coords), \"blue\": dummy_da(2, \"blue\", coords), \"uv\": dummy_da(-1, \"uv\", coords), })", "\"auth0\": \"http://test.url/auth0\", \"auth1\": \"http://test.url/auth1\", } global_cfg.published_CRSs = { \"EPSG:3857\": { # Web Mercator", "\"gml_name\": \"TEST/NATIVE_CRS\", \"alias_of\": None, }, } global_cfg.folder_index = { \"folder.existing_folder\": MagicMock(), } return", "= \"EPSG:4326\" else: pass if 'nonativeres' in s: pass elif 'nativeres' in s:", "\"foo_badnativecrs\": nb, \"foo_nativecrs\": nb, \"foo_nonativecrs\": nb, \"foo\": nb, \"bar\": nb, } dc.list_measurements.return_value =", "[ { \"value\": -1.0, \"color\": \"#000000\", \"alpha\": 0.0, }, { \"value\": 0.0, \"color\":", "Steep Terrain\", \"abstract\": \"\", \"flags\": { \"and\": { \"high_slope\": True, \"cloud\": True }", "band is missing or saturated\", \"bits\": 1, \"values\": { '0': False, '1': True", "True}, \"color\": \"#776857\", }, { \"title\": \"Water\", \"abstract\": \"\", \"flags\": { \"water_observed\": True,", "\"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3577\", \"alias_of\": None, }, \"TEST:CRS\": { \"geographic\": False, \"horizontal_coord\":", "\"blat\": {\"bits\": 2}, \"pow\": {\"bits\": 3}, \"zap\": {\"bits\": 4}, \"dang\": {\"bits\": 5}, }", "\"description\": \"All splodgy looking\" }, \"flavour\": { \"bits\": 3, \"values\": { '0': \"Bland\",", "join=\"exact\") @pytest.fixture def configs_for_landsat(): def ndvi(data): # Calculate NDVI (-1.0 to 1.0) unscaled", "\"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"red\": 1.0},", "\"parent\"} parent.attribution.title = \"Parent Attribution\" return parent @pytest.fixture def minimal_layer_cfg(): return { \"title\":", "} @pytest.fixture def minimal_global_raw_cfg(): return { \"global\": { \"title\": \"Test Title\", \"info_url\": \"https://my.domain.com/about_us\",", "# Cloudy Slopes rule needs to come before the Cloud # and High", "False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", }, \"EPSG:4326\": { # WGS-84 \"geographic\": True, \"vertical_coord_first\":", "{\"red\": 1.0}, \"blue\": {\"red\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": {", "{\"water_observed\": True}, \"color\": \"Aqua\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False},", "\"x\", \"vertical_coord\": \"y\", }, \"EPSG:4326\": { # WGS-84 \"geographic\": True, \"vertical_coord_first\": True },", "'0': False, '1': \"Woah!\" }, \"description\": \"Won't happen. Can't happen. Might happen.\", },", "<gh_stars>0 # This file is part of datacube-ows, part of the Open Data", "\"abstract\": \"\", \"flags\": { \"and\": { \"water_observed\": True, \"cloud_shadow\": True } }, \"color\":", "\"value\": 0.0, \"color\": \"#000000\", \"alpha\": 0.0, }, { \"value\": 0.1, \"color\": \"#000030\", \"alpha\":", "], \"multi_date\": [ { \"allowed_count_range\": [2, 2], \"preserve_user_date_order\": True, \"aggregator_function\": { \"function\": \"datacube_ows.band_utils.multi_date_delta\"", "\"Splodgy\", }, \"description\": \"All splodgy looking\" }, \"ugly\": { \"bits\": 1, \"values\": {", "}) return output def dim1_null_mask(coords): return dim1_da(\"mask\", [True] * len(coords), coords) @pytest.fixture def", "Session session = Session() return session.create_client(\"s3\", endpoint_url=MOTO_S3_ENDPOINT_URI) @pytest.fixture def s3_base(): # writable local", "\"color\": \"black\", }, {\"value\": 0.01, \"color\": \"#303000\", }, {\"value\": 0.5, \"color\": \"#707030\", },", "\"green\": {}, \"blue\": {}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\":", "\"subtest\": {\"test_py\": {\"include\": \"tests.cfg.simple.simple\", \"type\": \"python\"}, \\ \"test_json\": {\"include\": \"tests/cfg/simple.json\", \"type\": \"json\"}}}' )", "\"water\", \"flags\": { \"nodata\": False, \"noncontiguous\": False, \"terrain_shadow\": False, \"low_solar_angle\": False, \"high_slope\": False,", "\"mapped_bands\": True, \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"RdYlGn\", \"range\": [-1.0, 1.0]", "\"EPSG:3857\": { # Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False,", "global_cfg.keywords = {\"global\"} global_cfg.attribution.title = \"Global Attribution\" global_cfg.contact_org = None global_cfg.contact_position = None", "True}, \"color\": \"Beige\", }, { \"title\": \"Terrain\", \"abstract\": \"\", # Flag rules can", "}, \"color\": \"#2f2922\", }, { \"title\": \"Steep Terrain\", \"abstract\": \"\", \"flags\": {\"high_slope\": True},", "and High Slopes rules. { \"title\": \"Cloudy Slopes\", \"abstract\": \"\", \"flags\": {\"and\": {\"cloud\":", "{\"blue\": 1.0}, \"alpha\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\":", "{ \"formats\": { # Key is the format name, as used in DescribeCoverage", "\"json\"}') @pytest.fixture def s3_config_nested_2(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_2.json\" with s3.open(config_uri, \"wb\") as f_open:", "\"Splodgeless\", '1': \"Splodgy\", }, \"description\": \"All splodgy looking\" }, \"ugly\": { \"bits\": 1,", "\"lat\": { \"min\": -0.1, \"max\": 0.1, }, \"lon\": { \"min\": -0.1, \"max\": 0.1,", "\"water\": dummy_da(0b101, \"red\", coords, dtype=np.uint8, attrs = { \"flags_definition\": { \"nodata\": { \"bits\":", "0.001, \"longitude\": 0.001, } else: pass return mprod dc.index.products.get_by_name = product_by_name return dc", "def s3_config_simple(s3): config_uri = \"s3://testbucket/simple.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 1234}') @pytest.fixture", "\"title\": \"Single Band Test Style\", \"abstract\": \"\", \"components\": { \"red\": {\"band1\": 1.0}, \"green\":", "\"tests.cfg.simple.simple\", \"type\": \"python\"}, \\ \"test_json\": {\"include\": \"tests/cfg/simple.json\", \"type\": \"json\"}}}' ) @pytest.fixture def flask_client(monkeypatch):", "}, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"Brown\", }, ]", "[200, 500, 0, 200, 200, 700], dim_coords), \"green\": dim1_da(\"green\", [100, 500, 0, 400,", "\"or\": { \"terrain_shadow\": True, \"low_solar_angle\": True }, }, \"color\": \"#2f2922\", }, { \"title\":", "\"blue\": {\"npv\": 1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"enum\":", "\"blue\": 0.333, }, \"green\": {\"nir\": 1.0}, \"blue\": { \"swir1\": 0.5, \"swir2\": 0.5, },", "\"type\": \"json\"}]') @pytest.fixture def s3_config_nested_3(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_3.json\" with s3.open(config_uri, \"wb\") as", "(-1.0 to 1.0) unscaled = (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) #", "\"json\"}}]}' ) @pytest.fixture def s3_config_mixed_nested(s3, s3_config_simple): config_uri = \"s3://testbucket/mixed_nested.json\" with s3.open(config_uri, \"wb\") as", "}, \"EPSG:4326\": { # WGS-84 \"geographic\": True, \"vertical_coord_first\": True, \"horizontal_coord\": \"longitude\", \"vertical_coord\": \"latitude\",", "\"\", \"components\": { \"red\": {\"band1\": 1.0}, \"green\": {\"band1\": 1.0}, \"blue\": {\"band1\": 1.0}, },", "{ \"global\": { \"title\": \"Test Title\", \"info_url\": \"https://my.domain.com/about_us\", \"allowed_urls\": [ \"http://localhost\", \"http://unsecure.domain.com/odc\", \"https://secure.domain.com/ows\",", "0b010, 0b011, 0b100, 0b111], dim_coords, attrs={ \"flags_definition\": { \"splodgy\": { \"bits\": 2, \"values\":", "\"vertical_coord\": \"latitude\", \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/4326\", \"alias_of\": None, }, \"EPSG:3577\": { \"geographic\": False, \"horizontal_coord\": \"x\",", "1.0, \"scale_range\": (1600, 3200), }, \"blue\": {\"green\": 1.0}, }, \"scale_range\": (200, 1900), },", "True, \"cloud\": True } }, \"color\": \"#bad4f2\", }, { \"title\": \"Shaded Water\", \"abstract\":", "\"\", \"flags\": {\"water_observed\": False}, \"color\": \"SaddleBrown\", }, ] } }, { \"value_map\": {", "\"values\": { '0': False, '1': True }, }, \"noncontiguous\": { \"description\": \"At least", "}, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"swir1\": 1.0}, \"green\": {\"nir\":", "None, }, } global_cfg.folder_index = { \"folder.existing_folder\": MagicMock(), } return global_cfg @pytest.fixture def", "= ['band1', 'band2', 'band3', 'band4'] nb.__getitem__.return_value = { \"band1\": -999, \"band2\": -999, \"band3\":", "[2, 2], \"preserve_user_date_order\": True, \"aggregator_function\": { \"function\": \"datacube_ows.band_utils.multi_date_delta\" }, \"mpl_ramp\": \"RdYlBu\", \"range\": [-1.0,", "def mock_range(): times = [datetime.datetime(2010, 1, 1), datetime.datetime(2010, 1, 2)] return { \"lat\":", "\"http://localhost\", \"http://unsecure.domain.com/odc\", \"https://secure.domain.com/ows\", ], \"published_CRSs\": { \"EPSG:3857\": { # Web Mercator \"geographic\": False,", "times[-1], \"time_set\": set(times), \"bboxes\": { \"EPSG:4326\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\":", "data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) # Scale to [-1.0 - 1.0] to [0", "@pytest.fixture def configs_for_wofs(): return [ { \"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\":", "{ \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, \"alpha\": { \"function\":", "[100, 500, 0, 400, 300, 200], dim_coords), \"blue\": dim1_da(\"blue\", [200, 500, 1000, 600,", "600, 900, 200, 400, 100], dim_coords), \"pq\": dim1_da(\"pq\", [0b000, 0b001, 0b010, 0b011, 0b100,", "{ \"bits\": 3, \"values\": { '0': \"Bland\", '1': \"Tasty\", }, \"description\": \"All splodgy", "\"BurlyWood\", }, # Only matches non-cloudy high-slopes. { \"title\": \"High Slopes\", \"abstract\": \"\",", "is missing or saturated\", \"bits\": 1, \"values\": { '0': False, '1': True },", "1.0] to [0 - 255] scaled = ((unscaled + 1.0) * 255 /", "as used in DescribeCoverage XML \"GeoTIFF\": { \"renderer\": \"datacube_ows.wcs_utils.get_tiff\", # The MIME type", "3000), }, { \"components\": { \"red\": { \"swir1\": 1.0, \"scale_range\": (1500, 3700), },", "}, ] }, }, ] @pytest.fixture def configs_for_combined_fc_wofs(): return [ { \"components\": {", "@pytest.fixture def s3_config_nested_3(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_3.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\":", "} }) }) return output @pytest.fixture def dummy_raw_fc_data(): output = xr.Dataset({ \"bs\": dummy_da(546,", "\"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3857\", \"alias_of\": None, }, \"EPSG:4326\": { # WGS-84 \"geographic\": True, \"vertical_coord_first\": True,", "\"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (10, 800), }, { \"components\":", "dtype=np.int16), \"swir2\": dummy_da(1051, \"swir2\", coords, dtype=np.int16), }) return output @pytest.fixture def dummy_raw_wo_data(): output", "\"band4\": \"nan\", } lmo = MagicMock() lmo.loc = { \"foo_nativeres\": nb, \"foo_nonativeres\": nb,", "def minimal_global_raw_cfg(): return { \"global\": { \"title\": \"Test Title\", \"info_url\": \"https://my.domain.com/about_us\", \"allowed_urls\": [", "}, {\"value\": 1.0, \"color\": \"#FF9090\", }, ] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\",", "in s: mprod.definition[\"storage\"][\"resolution\"] = { \"latitude\": 0.001, \"longitude\": 0.001, } else: pass return", "dim_coords), \"red\": dim1_da(\"red\", [200, 500, 0, 200, 200, 700], dim_coords), \"green\": dim1_da(\"green\", [100,", "attrs = { \"flags_definition\": { \"nodata\": { \"bits\": 0, \"description\": \"No data\", \"values\":", "{ \"title\": \"Water\", \"abstract\": \"\", \"flags\": { \"water_observed\": True, }, \"color\": \"#4f81bd\", },", "2}, \"pow\": {\"bits\": 3}, \"zap\": {\"bits\": 4}, \"dang\": {\"bits\": 5}, } mprod.lookup_measurements.return_value =", "\"bits\": 3, \"values\": { '0': \"Bland\", '1': \"Tasty\", }, \"description\": \"All splodgy looking\"", "{ \"title\": \"The Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"product_name\": \"foo\", \"image_processing\": {", "timeout -= 0.1 time.sleep(0.1) yield proc.terminate() proc.wait() @pytest.fixture() def s3(s3_base, monkeypatch): monkeypatch.setenv(\"AWS_ACCESS_KEY_ID\", \"foo\")", "\"color\": \"#000030\", \"alpha\": 1.0, }, { \"value\": 0.3, \"color\": \"#703070\", }, { \"value\":", "\"Won't happen. Can't happen. Might happen.\", }, } }) }) return output def", "{\"red\": 1.0}, \"green\": { \"function\": scaled_ndvi, \"kwargs\": { \"scale_from\": (0.0, 1.0), \"scale_to\": (0,", "return session.create_client(\"s3\", endpoint_url=MOTO_S3_ENDPOINT_URI) @pytest.fixture def s3_base(): # writable local S3 system # adapted", "(50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"red\": 1.0}, \"blue\":", "\"band1\": -999, \"band2\": -999, \"band3\": float(\"nan\"), \"band4\": \"nan\", } lmo = MagicMock() lmo.loc", "}, \"impossible\": { \"bits\": 0, \"values\": { '0': False, '1': \"Woah!\" }, \"description\":", "} ] } } @pytest.fixture def mock_range(): times = [datetime.datetime(2010, 1, 1), datetime.datetime(2010,", "}, \"blue\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"green\", \"band2\": \"nir\", \"scale_from\": (0.0,", "\"color\": \"#303030\", }, {\"value\": 0.0, \"color\": \"black\", }, {\"value\": 0.01, \"color\": \"#303000\", },", "\"#000000\", \"alpha\": 0.0, }, { \"value\": 0.1, \"color\": \"#000030\", \"alpha\": 1.0, }, {", "}, { \"title\": \"Water\", \"abstract\": \"\", \"flags\": { \"water_observed\": True, }, \"color\": \"#4f81bd\",", "dummy_raw_calc_data(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] output = xr.Dataset({ \"ir\":", "[0 - 255] scaled = ((unscaled + 1.0) * 255 / 2).clip(0, 255)", "@pytest.fixture def minimal_multiprod_cfg(): return { \"title\": \"The Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\",", "\"values\": { '0': False, '1': True }, }, \"low_solar_angle\": { \"description\": \"Low solar", "least one EO band is missing or saturated\", \"bits\": 1, \"values\": { '0':", "{ \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"green\", \"band2\": \"nir\", \"scale_from\": (0.0, 1.0), \"scale_to\":", "[0.0, 100.0], \"pq_masks\": [ { # Mask out nodata pixels. \"band\": \"water\", \"enum\":", "out pixels with low_solar_angle, high_slope # or cloud shadow. \"band\": \"water\", \"flags\": {", "}, \"layers\": [] } @pytest.fixture def wcs_global_cfg(): return { \"formats\": { # Key", "monkeypatch.setenv(\"DEFER_CFG_PARSE\", \"yes\") from datacube_ows.ogc import app with app.test_client() as client: yield client @pytest.fixture", "}, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"#c2c1c0\", }, {", "= { \"flags_definition\": { \"nodata\": { \"bits\": 0, \"description\": \"No data\", \"values\": {", "\"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ {", "= { \"folder.existing_folder\": MagicMock(), } return global_cfg @pytest.fixture def minimal_parent(): parent = MagicMock()", "{ \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, \"alpha\":", "\"value_map\": { \"water\": [ { \"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\":", "{ \"latitude\": 0.001, \"longitude\": 0.001, } else: pass return mprod dc.index.products.get_by_name = product_by_name", "= \"s3://testbucket/nested_4.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 3222, \"things\": [{\"test\": 2572, \"thing\":", "\"pow\": {\"bits\": 3}, \"zap\": {\"bits\": 4}, \"dang\": {\"bits\": 5}, } mprod.lookup_measurements.return_value = {", "-2.0, -3.0] return dim1_da(\"mask\", [True] * len(dim_coords), dim_coords) @pytest.fixture def dummy_col_map_data(): dim_coords =", "@pytest.fixture def wcs_global_cfg(): return { \"formats\": { # Key is the format name,", "{ \"and\": { \"high_slope\": True, \"cloud\": True } }, \"color\": \"#f2dcb4\", }, {", "\"blue\": dim1_da(\"blue\", [200, 500, 1000, 600, 100, 700], dim_coords), \"uv\": dim1_da(\"uv\", [400, 600,", "\"green\": dim1_da(\"green\", [100, 500, 0, 400, 300, 200], dim_coords), \"blue\": dim1_da(\"blue\", [200, 500,", "or saturated\", \"bits\": 1, \"values\": { '0': False, '1': True }, }, \"low_solar_angle\":", "\"Joyous\", }, \"description\": \"All splodgy looking\" }, \"flavour\": { \"bits\": 3, \"values\": {", "Abstract\", \"name\": \"a_layer\", \"product_name\": \"foo\", \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": { \"default_style\":", "\"Low solar incidence angle\", \"bits\": 2, \"values\": { '0': False, '1': True },", "{ \"water\": [ # Cloudy Slopes rule needs to come before the Cloud", "times, \"start_time\": times[0], \"end_time\": times[-1], \"time_set\": set(times), \"bboxes\": { \"EPSG:4326\": {\"top\": 0.1, \"bottom\":", "\"alias_of\": None, }, \"EPSG:3577\": { \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False,", "\"flags\": { \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False, } }, { # Mask", "dtype=np.int16), }) return output @pytest.fixture def dummy_raw_wo_data(): output = xr.Dataset({ \"water\": dummy_da(0b101, \"red\",", "\"nan\", } lmo = MagicMock() lmo.loc = { \"foo_nativeres\": nb, \"foo_nonativeres\": nb, \"foo_badnativecrs\":", "in s: return None mprod = MagicMock() flag_def = { \"moo\": {\"bits\": 0},", "'0': False, '1': True }, }, \"cloud\": { \"description\": \"Cloudy\", \"bits\": 6, \"values\":", "\"#335277\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"#c2c1c0\", },", "'lookupfail' in s: return None mprod = MagicMock() flag_def = { \"moo\": {\"bits\":", "] } } @pytest.fixture def minimal_multiprod_cfg(): return { \"title\": \"The Title\", \"abstract\": \"The", "\"bar\") client = get_boto3_client() client.create_bucket(Bucket=\"testbucket\") S3FileSystem.clear_instance_cache() s3 = S3FileSystem(anon=False, client_kwargs={\"endpoint_url\": MOTO_S3_ENDPOINT_URI}) s3.invalidate_cache() yield", "\"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"ocean_r\", \"range\": [0.0, 1.0]", "{\"npv\": 1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"flags\": {", "client: yield client @pytest.fixture def minimal_dc(): dc = MagicMock() nb = MagicMock() nb.index", "(50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\":", "\"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\": [ # Cloudy Slopes rule needs", "True }, }, }, \"layers\": [] } @pytest.fixture def wcs_global_cfg(): return { \"formats\":", "\"water_observed\": { \"description\": \"Classified as water by the decision tree\", \"bits\": 7, \"values\":", "\"and\": { \"high_slope\": True, \"cloud\": True } }, \"color\": \"#f2dcb4\", }, { \"title\":", "2573, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 2574, \"thing\": {\"include\": \"nested_3.json\", \"type\":", "\"#2f2922\", }, { \"title\": \"Steep Terrain\", \"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\": \"#776857\",", "Shadow or Low Sun Angle\", \"abstract\": \"\", \"flags\": { \"or\": { \"terrain_shadow\": True,", "AND no water observed \"band\": \"water\", \"flags\": { \"cloud\": True, \"water_observed\": False, },", "\"pq\": dim1_da(\"pq\", [0b000, 0b001, 0b010, 0b011, 0b100, 0b111], dim_coords, attrs={ \"flags_definition\": { \"splodgy\":", "{\"terrain_shadow\": True, \"high_slope\": True}}, \"color\": \"SlateGray\", }, { \"title\": \"Cloud Shadow and High", "def dummy_raw_wo_data(): output = xr.Dataset({ \"water\": dummy_da(0b101, \"red\", coords, dtype=np.uint8, attrs = {", "@pytest.fixture def dummy_raw_wo_data(): output = xr.Dataset({ \"water\": dummy_da(0b101, \"red\", coords, dtype=np.uint8, attrs =", "\"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"Brown\", }, ] } },", "}, {\"value\": -0.01, \"color\": \"#303030\", }, {\"value\": 0.0, \"color\": \"black\", }, {\"value\": 0.01,", "\"kwargs\": { \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, \"blue\": {\"blue\": 1.0},", "100.0], \"pq_masks\": [ { \"band\": \"water\", \"enum\": 1, } ] }, { \"components\":", "\"blue\": {\"band1\": 1.0}, }, \"scale_range\": [0, 1024] } ] } } @pytest.fixture def", "happen.\", }, } }) }) return output def dim1_null_mask(coords): return dim1_da(\"mask\", [True] *", "\"ugly\": { \"bits\": 1, \"values\": { '0': False, '1': True }, \"description\": \"Real,", "\"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, \"blue\": {", "scalable @scalable def scaled_ndvi(data): # Calculate NDVI (-1.0 to 1.0) return (data[\"nir\"] -", "# Mask out pixels with cloud AND no water observed \"band\": \"water\", \"flags\":", "\"RdYlGn\", \"range\": [-1.0, 1.0] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\",", "from datacube_ows.styles.api import scalable @scalable def scaled_ndvi(data): # Calculate NDVI (-1.0 to 1.0)", "1}, \"blat\": {\"bits\": 2}, \"pow\": {\"bits\": 3}, \"zap\": {\"bits\": 4}, \"dang\": {\"bits\": 5},", "# WGS-84 \"geographic\": True, \"vertical_coord_first\": True }, }, }, \"layers\": [] } @pytest.fixture", "used in the Http Response. \"mime\": \"image/geotiff\", # The file extension to add", "\"color\": \"Brown\", }, ] } }, { \"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\",", "{ \"function\": \"datacube_ows.band_utils.norm_diff\", \"mapped_bands\": True, \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"RdYlGn\",", "xr.Dataset({ \"bs\": dummy_da(546, \"bs\", coords, dtype=np.int16), \"pv\": dummy_da(723, \"pv\", coords, dtype=np.int16), \"npv\": dummy_da(209,", "\"json\"}}, \\ {\"test\": 2574, \"thing\": {\"include\": \"nested_3.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_mixed_nested(s3,", "\\ {\"test\": 2574, \"thing\": {\"include\": \"nested_3.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_mixed_nested(s3, s3_config_simple):", "# This file is part of datacube-ows, part of the Open Data Cube", "\"json\"}}}' ) @pytest.fixture def flask_client(monkeypatch): monkeypatch.setenv(\"DEFER_CFG_PARSE\", \"yes\") from datacube_ows.ogc import app with app.test_client()", "}, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\":", "\"color\": \"#bad4f2\", }, { \"title\": \"Shaded Water\", \"abstract\": \"\", \"flags\": { \"and\": {", "{ # Key is the format name, as used in DescribeCoverage XML \"GeoTIFF\":", "return [ { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\":", "dummy_da(1400, \"red\", xyt_coords, dtype=\"int16\"), \"green\": dummy_da(700, \"green\", xyt_coords, dtype=\"int16\"), \"blue\": dummy_da(1500, \"blue\", xyt_coords,", "\"thing\": {\"include\": \"nested_3.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_mixed_nested(s3, s3_config_simple): config_uri = \"s3://testbucket/mixed_nested.json\"", "Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", }, \"EPSG:4326\": { # WGS-84 \"geographic\":", "\"or\": { \"noncontiguous\": True, \"nodata\": True, }, }, \"alpha\": 0.0, \"color\": \"#ffffff\", },", "\"alias_of\": None, }, \"EPSG:4326\": { # WGS-84 \"geographic\": True, \"vertical_coord_first\": True, \"horizontal_coord\": \"longitude\",", "xyt_coords = [ (\"x\", [-1.0, -0.5, 0.0, 0.5, 1.0]), (\"y\", [-1.0, -0.5, 0.0,", "global_cfg.attribution.title = \"Global Attribution\" global_cfg.contact_org = None global_cfg.contact_position = None global_cfg.abstract = \"Global", "\"multi-time\": True, } }, \"native_format\": \"GeoTIFF\", } @pytest.fixture def dummy_raw_data(): output = xr.Dataset({", "\"red\", coords, dtype=np.int16), \"green\": dummy_da(7, \"green\", coords, dtype=np.int16), \"blue\": dummy_da(2, \"blue\", coords, dtype=np.int16),", "\"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\": [ # Cloudy Slopes rule needs to", "{\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [ {\"value\": -1.0, \"color\": \"#0000FF\"}, {\"value\": -0.2,", "\"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [ { \"value\": -1.0,", "200, 700], dim_coords), \"green\": dim1_da(\"green\", [100, 500, 0, 400, 300, 200], dim_coords), \"blue\":", "{\"red\": 1.0}, \"green\": {\"red\": 1.0}, \"blue\": {\"red\": 1.0}, }, \"scale_range\": (50, 3000), },", "proc.terminate() proc.wait() @pytest.fixture() def s3(s3_base, monkeypatch): monkeypatch.setenv(\"AWS_ACCESS_KEY_ID\", \"foo\") monkeypatch.setenv(\"AWS_SECRET_ACCESS_KEY\", \"bar\") client = get_boto3_client()", "1.0, }, { \"value\": 0.3, \"color\": \"#703070\", }, { \"value\": 0.6, \"color\": \"#e0e070\",", "{ \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, },", "8000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\":", "else: pass return mprod dc.index.products.get_by_name = product_by_name return dc @pytest.fixture def minimal_global_cfg(): global_cfg", "dim1_da(\"pq\", [0b000, 0b001, 0b010, 0b011, 0b100, 0b111], dim_coords, attrs={ \"flags_definition\": { \"splodgy\": {", "= MagicMock() flag_def = { \"moo\": {\"bits\": 0}, \"floop\": {\"bits\": 1}, \"blat\": {\"bits\":", "Attribution\" return parent @pytest.fixture def minimal_layer_cfg(): return { \"title\": \"The Title\", \"abstract\": \"The", "dim1_da(\"mask\", [True] * len(coords), coords) @pytest.fixture def raw_calc_null_mask(): dim_coords = [-2.0, -1.0, 0.0,", "[ \"http://localhost\", \"http://unsecure.domain.com/odc\", \"https://secure.domain.com/ows\", ], \"published_CRSs\": { \"EPSG:3857\": { # Web Mercator \"geographic\":", "shadow\", \"bits\": 3, \"values\": { '0': False, '1': True }, }, \"high_slope\": {", "\"flags\": { \"and\": { \"water_observed\": True, \"cloud_shadow\": True } }, \"color\": \"#335277\", },", "5 while timeout > 0: try: r = requests.get(MOTO_S3_ENDPOINT_URI) if r.ok: break except:", "1.0}}, \"scale_range\": [0.0, 100.0], }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\":", "22, 44, 5), datetime.datetime.now() ]) ] @pytest.fixture def xyt_dummydata(): return xarray.Dataset({ \"red\": dummy_da(1400,", "\"high_slope\": True}}, \"color\": \"BurlyWood\", }, # Only matches non-cloudy high-slopes. { \"title\": \"High", "file is part of datacube-ows, part of the Open Data Cube project. #", "\"geographic\": False, \"horizontal_coord\": \"horrible_zonts\", \"vertical_coord\": \"vertex_calories\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/CRS\", \"alias_of\": None, },", "dummy_da(True, \"mask\", coords, dtype=np.bool) @pytest.fixture def dummy_raw_calc_data(): dim_coords = [-2.0, -1.0, 0.0, -1.0,", "\"Parent Abstract\" parent.keywords = {\"global\", \"parent\"} parent.attribution.title = \"Parent Attribution\" return parent @pytest.fixture", "{ \"renderer\": \"datacube_ows.wcs_utils.get_netcdf\", \"mime\": \"application/x-netcdf\", \"extension\": \"nc\", \"multi-time\": True, } }, \"native_format\": \"GeoTIFF\",", "\"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [ { \"value\": -1.0, \"color\": \"#000000\", \"alpha\": 0.0,", "0b011, 0b100, 0b111], dim_coords, attrs={ \"flags_definition\": { \"splodgy\": { \"bits\": 2, \"values\": {", "pixels. \"band\": \"water\", \"enum\": 1, \"invert\": True, }, { # Mask out pixels", "None mprod = MagicMock() flag_def = { \"moo\": {\"bits\": 0}, \"floop\": {\"bits\": 1},", "\"flags\": {\"water_observed\": False}, \"color\": \"#96966e\", }, ] }, }, ] @pytest.fixture def configs_for_combined_fc_wofs():", "True, }, ] } ] @pytest.fixture def multi_date_cfg(): return { \"index_function\": { \"function\":", "-1.0, -2.0, -3.0] output = xr.Dataset({ \"ir\": dim1_da(\"ir\", [800, 100, 1000, 600, 200,", "dummy_da(7, \"green\", coords, dtype=np.int16), \"blue\": dummy_da(2, \"blue\", coords, dtype=np.int16), \"nir\": dummy_da(101, \"nir\", coords,", "subprocess proc = subprocess.Popen([\"moto_server\", \"s3\", \"-p\", MOTO_PORT]) timeout = 5 while timeout >", "[0, 1024] } ] } } @pytest.fixture def minimal_multiprod_cfg(): return { \"title\": \"The", "of the Open Data Cube project. # See https://opendatacube.org for more information. #", "# Mask out nodata pixels. \"band\": \"water\", \"enum\": 1, \"invert\": True, }, {", "MagicMock() parent.abstract = \"Parent Abstract\" parent.keywords = {\"global\", \"parent\"} parent.attribution.title = \"Parent Attribution\"", "True, }, { # Mask out pixels with low_solar_angle, high_slope # or cloud", "config_uri = \"s3://testbucket/nested_1.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"include\": \"simple.json\", \"type\": \"json\"}') @pytest.fixture", "}, \"netCDF\": { \"renderer\": \"datacube_ows.wcs_utils.get_netcdf\", \"mime\": \"application/x-netcdf\", \"extension\": \"nc\", \"multi-time\": True, } },", "\"EPSG:9999\" elif 'nativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:4326\" else: pass if 'nonativeres' in", "}, }, \"terrain_shadow\": { \"description\": \"Terrain shadow\", \"bits\": 3, \"values\": { '0': False,", "{ \"band\": \"water\", \"enum\": 1, } ] }, { \"components\": { \"red\": {\"bs\":", "\"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"Beige\", }, { \"title\": \"Terrain\",", "{ '0': False, '1': True }, }, \"water_observed\": { \"description\": \"Classified as water", "{ \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"red\": 1.0}, \"blue\": {\"red\": 1.0}, },", "{ \"water_observed\": True, \"cloud\": True } }, \"color\": \"#bad4f2\", }, { \"title\": \"Shaded", "xr.Dataset({ \"water\": dummy_da(0b101, \"red\", coords, dtype=np.uint8, attrs = { \"flags_definition\": { \"nodata\": {", "\"datacube_ows.band_utils.norm_diff\", \"mapped_bands\": True, \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"RdYlGn\", \"range\": [-1.0,", "Slopes rules. { \"title\": \"Cloudy Slopes\", \"abstract\": \"\", \"flags\": {\"and\": {\"cloud\": True, \"high_slope\":", "True, \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"RdYlGn\", \"range\": [-1.0, 1.0] },", "[0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"enum\": 1, } ] }, {", "{\"and\": {\"cloud_shadow\": True, \"high_slope\": True}}, \"color\": \"DarkKhaki\", }, { \"title\": \"Dry\", \"abstract\": \"\",", "\"#FF9090\", }, ], \"multi_date\": [ { \"allowed_count_range\": [2, 2], \"preserve_user_date_order\": True, \"aggregator_function\": {", "def minimal_multiprod_cfg(): return { \"title\": \"The Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"multi_product\":", "with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 9364, \\ \"subtest\": {\"test_py\": {\"include\": \"tests.cfg.simple.simple\", \"type\":", "\"mpl_ramp\": \"RdYlGn\", \"range\": [-1.0, 1.0] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\":", "1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (10, 800), }, { \"components\": { \"red\":", "1.0}, \"blue\": {\"blue\": 1.0}, \"alpha\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\":", "\"tests/cfg/simple.json\", \"type\": \"json\"}}}' ) @pytest.fixture def flask_client(monkeypatch): monkeypatch.setenv(\"DEFER_CFG_PARSE\", \"yes\") from datacube_ows.ogc import app", "information. # # Copyright (c) 2017-2021 OWS Contributors # SPDX-License-Identifier: Apache-2.0 import datetime", "# adapted from https://github.com/dask/s3fs/blob/main/s3fs/tests/test_s3fs.py import subprocess proc = subprocess.Popen([\"moto_server\", \"s3\", \"-p\", MOTO_PORT]) timeout", "-0.1, \"color\": \"#505050\", }, {\"value\": -0.01, \"color\": \"#303030\", }, {\"value\": 0.0, \"color\": \"black\",", "\"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"#c2c1c0\", }, { \"title\": \"Cloud Shadow\",", "coords, dtype=np.int16), \"npv\": dummy_da(209, \"npv\", coords, dtype=np.int16), }) return output @pytest.fixture def dummy_raw_fc_plus_wo(dummy_raw_fc_data,", "\"components\": { \"red\": {\"red\": 1.0}, \"green\": { \"function\": scaled_ndvi, \"kwargs\": { \"scale_from\": (0.0,", "ugly\", }, \"impossible\": { \"bits\": 0, \"values\": { '0': False, '1': \"Woah!\" },", "True}, \"color\": \"Brown\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\":", "Might happen.\", }, } }) }) return output @pytest.fixture def dummy_raw_ls_data(): output =", "}, \"green\": { \"nir\": 1.0, \"scale_range\": (1600, 3200), }, \"blue\": {\"green\": 1.0}, },", "True}, \"color\": \"Aqua\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\":", "\"Cloudy Water\", \"abstract\": \"\", \"flags\": { \"and\": { \"water_observed\": True, \"cloud\": True }", "} ] } } @pytest.fixture def minimal_multiprod_cfg(): return { \"title\": \"The Title\", \"abstract\":", "def flask_client(monkeypatch): monkeypatch.setenv(\"DEFER_CFG_PARSE\", \"yes\") from datacube_ows.ogc import app with app.test_client() as client: yield", "100.0], \"pq_masks\": [ { \"band\": \"water\", \"flags\": { \"nodata\": False, \"noncontiguous\": False, \"terrain_shadow\":", "\"band2\": \"red\"}, }, \"mpl_ramp\": \"ocean_r\", \"range\": [0.0, 1.0] }, { \"index_function\": { \"function\":", "\"Parent Attribution\" return parent @pytest.fixture def minimal_layer_cfg(): return { \"title\": \"The Title\", \"abstract\":", "nb, } dc.list_measurements.return_value = lmo def product_by_name(s): if 'lookupfail' in s: return None", "s3_config_mixed_nested(s3, s3_config_simple): config_uri = \"s3://testbucket/mixed_nested.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 9364, \\", "{\"cloud\": True}, \"color\": \"Beige\", }, { \"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\":", "dummy_da(723, \"pv\", coords, dtype=np.int16), \"npv\": dummy_da(209, \"npv\", coords, dtype=np.int16), }) return output @pytest.fixture", "}, }, ] @pytest.fixture def configs_for_combined_fc_wofs(): return [ { \"components\": { \"red\": {\"bs\":", "\"default_style\": \"band1\", \"styles\": [ { \"name\": \"band1\", \"title\": \"Single Band Test Style\", \"abstract\":", "time slices. \"multi-time\": False }, \"netCDF\": { \"renderer\": \"datacube_ows.wcs_utils.get_netcdf\", \"mime\": \"application/x-netcdf\", \"extension\": \"nc\",", "}, {\"value\": 1.0, \"color\": \"#FF9090\", }, ], \"multi_date\": [ { \"allowed_count_range\": [2, 2],", "}, { \"value\": 0.0, \"color\": \"#000000\", \"alpha\": 0.0, }, { \"value\": 0.1, \"color\":", "\"blue\": {}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0},", "= subprocess.Popen([\"moto_server\", \"s3\", \"-p\", MOTO_PORT]) timeout = 5 while timeout > 0: try:", "S3FileSystem(anon=False, client_kwargs={\"endpoint_url\": MOTO_S3_ENDPOINT_URI}) s3.invalidate_cache() yield s3 @pytest.fixture def s3_config_simple(s3): config_uri = \"s3://testbucket/simple.json\" with", "\"hortizonal_cults\", \"vertical_coord\": \"verbal_tics\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/NATIVE_CRS\", \"alias_of\": None, }, } global_cfg.folder_index =", "\"title\": \"The Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"multi_product\": True, \"product_names\": [\"foo\", \"bar\"],", "\"red\": dummy_da(1400, \"red\", xyt_coords, dtype=\"int16\"), \"green\": dummy_da(700, \"green\", xyt_coords, dtype=\"int16\"), \"blue\": dummy_da(1500, \"blue\",", "\"horizontal_coord\": \"horrible_zonts\", \"vertical_coord\": \"vertex_calories\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/CRS\", \"alias_of\": None, }, \"TEST:NATIVE_CRS\": {", "\"red\": dummy_da(5, \"red\", coords, dtype=np.int16), \"green\": dummy_da(7, \"green\", coords, dtype=np.int16), \"blue\": dummy_da(2, \"blue\",", "\"mime\": \"application/x-netcdf\", \"extension\": \"nc\", \"multi-time\": True, } }, \"native_format\": \"GeoTIFF\", } @pytest.fixture def", "{\"cloud_shadow\": True}, \"color\": \"SlateGray\", }, { \"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True},", "\"nir\": dummy_da(101, \"nir\", coords, dtype=np.int16), \"swir1\": dummy_da(1051, \"swir1\", coords, dtype=np.int16), \"swir2\": dummy_da(1051, \"swir2\",", "\"water_observed\": False, }, \"invert\": True, }, ] } ] @pytest.fixture def multi_date_cfg(): return", "\"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [ {\"value\":", "[0.0, 100.0], }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\":", "= None global_cfg.abstract = \"Global Abstract\" global_cfg.authorities = { \"auth0\": \"http://test.url/auth0\", \"auth1\": \"http://test.url/auth1\",", "[-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] output = xr.Dataset({ \"ir\": dim1_da(\"ir\", [800, 100,", "s3(s3_base, monkeypatch): monkeypatch.setenv(\"AWS_ACCESS_KEY_ID\", \"foo\") monkeypatch.setenv(\"AWS_SECRET_ACCESS_KEY\", \"bar\") client = get_boto3_client() client.create_bucket(Bucket=\"testbucket\") S3FileSystem.clear_instance_cache() s3 =", "{ \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (10,", "] }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\":", "}, ] } }, { \"value_map\": { \"water\": [ { # Make noncontiguous", "Copyright (c) 2017-2021 OWS Contributors # SPDX-License-Identifier: Apache-2.0 import datetime import time from", "22563, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 22564, \"thing\": {\"include\": \"simple.json\", \"type\":", "} ] @pytest.fixture def multi_date_cfg(): return { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\":", "in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:4326\" else: pass if 'nonativeres' in s: pass elif", "700], dim_coords), \"uv\": dim1_da(\"uv\", [400, 600, 900, 200, 400, 100], dim_coords), \"pq\": dim1_da(\"pq\",", "\"water\", \"flags\": { \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False, } }, { #", "Http Response. \"mime\": \"image/geotiff\", # The file extension to add to the filename.", "flag_def = { \"moo\": {\"bits\": 0}, \"floop\": {\"bits\": 1}, \"blat\": {\"bits\": 2}, \"pow\":", "\"\", \"flags\": {\"cloud\": True}, \"color\": \"Beige\", }, { \"title\": \"Terrain\", \"abstract\": \"\", #", "'1': True }, }, \"noncontiguous\": { \"description\": \"At least one EO band is", "] @pytest.fixture def configs_for_combined_fc_wofs(): return [ { \"components\": { \"red\": {\"bs\": 1.0}, \"green\":", "{ \"cloud\": True, \"water_observed\": False, }, \"invert\": True, }, ] } ] @pytest.fixture", "\"color\": \"#c2c1c0\", }, { \"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\":", "in s: pass elif 'nativeres' in s: mprod.definition[\"storage\"][\"resolution\"] = { \"latitude\": 0.001, \"longitude\":", "3000), }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"mapped_bands\": True, \"kwargs\": {\"band1\": \"nir\", \"band2\":", "\"color\": \"Aqua\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"SaddleBrown\",", "{\"blue\": 1.0}, }, \"scale_range\": (10, 800), }, { \"components\": { \"red\": {\"red\": 1.0},", "{ \"value\": 0.3, \"color\": \"#703070\", }, { \"value\": 0.6, \"color\": \"#e0e070\", }, {", "1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000, 3000), }, { \"components\": { \"red\":", "] } ] @pytest.fixture def multi_date_cfg(): return { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\":", "mprod.definition[\"storage\"][\"resolution\"] = { \"latitude\": 0.001, \"longitude\": 0.001, } else: pass return mprod dc.index.products.get_by_name", "0: try: r = requests.get(MOTO_S3_ENDPOINT_URI) if r.ok: break except: pass timeout -= 0.1", "as client: yield client @pytest.fixture def minimal_dc(): dc = MagicMock() nb = MagicMock()", "] @pytest.fixture def xyt_dummydata(): return xarray.Dataset({ \"red\": dummy_da(1400, \"red\", xyt_coords, dtype=\"int16\"), \"green\": dummy_da(700,", "s3.invalidate_cache() yield s3 @pytest.fixture def s3_config_simple(s3): config_uri = \"s3://testbucket/simple.json\" with s3.open(config_uri, \"wb\") as", "dtype=np.int16), \"blue\": dummy_da(2, \"blue\", coords, dtype=np.int16), \"nir\": dummy_da(101, \"nir\", coords, dtype=np.int16), \"swir1\": dummy_da(1051,", "0b100, 0b111], dim_coords, attrs={ \"flags_definition\": { \"splodgy\": { \"bits\": 2, \"values\": { '0':", "\"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000, 3000), }, { \"components\":", "True }, \"description\": \"Real, real ugly\", }, \"impossible\": { \"bits\": 0, \"values\": {", "\"swir2\", coords, dtype=np.int16), }) return output @pytest.fixture def dummy_raw_wo_data(): output = xr.Dataset({ \"water\":", "\"https://my.domain.com/about_us\", \"allowed_urls\": [ \"http://localhost\", \"http://unsecure.domain.com/odc\", \"https://secure.domain.com/ows\", ], \"published_CRSs\": { \"EPSG:3857\": { # Web", "2, \"values\": { '0': False, '1': True }, }, \"terrain_shadow\": { \"description\": \"Terrain", "\"blue\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"green\", \"band2\": \"nir\", \"scale_from\": (0.0, 1.0),", "= xr.Dataset({ \"red\": dummy_da(5, \"red\", coords, dtype=np.int16), \"green\": dummy_da(7, \"green\", coords, dtype=np.int16), \"blue\":", "\"#303000\", }, {\"value\": 0.5, \"color\": \"#707030\", }, {\"value\": 1.0, \"color\": \"#FF9090\", }, ],", "}, \"water_observed\": { \"description\": \"Classified as water by the decision tree\", \"bits\": 7,", "app.test_client() as client: yield client @pytest.fixture def minimal_dc(): dc = MagicMock() nb =", "\"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\":", "\"published_CRSs\": { \"EPSG:3857\": { # Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\",", "}, \"EPSG:3577\": { \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3577\",", "as f_open: f_open.write(b'{\"test\": 3222, \"things\": [{\"test\": 2572, \"thing\": null}, \\ {\"test\": 2573, \"thing\":", "}, }, \"alpha\": 0.0, \"color\": \"#ffffff\", }, { \"title\": \"Cloudy Steep Terrain\", \"abstract\":", "@pytest.fixture def configs_for_landsat(): def ndvi(data): # Calculate NDVI (-1.0 to 1.0) unscaled =", "{\"water_observed\": False}, \"color\": \"SaddleBrown\", }, ] } }, { \"value_map\": { \"water\": [", "}, \"invert\": True, }, ] } ] @pytest.fixture def multi_date_cfg(): return { \"index_function\":", "None global_cfg.contact_position = None global_cfg.abstract = \"Global Abstract\" global_cfg.authorities = { \"auth0\": \"http://test.url/auth0\",", "{ \"splodgy\": { \"bits\": 2, \"values\": { '0': \"Splodgeless\", '1': \"Splodgy\", }, \"description\":", "{ \"EPSG:3857\": { # Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\":", "True, } }, \"native_format\": \"GeoTIFF\", } @pytest.fixture def dummy_raw_data(): output = xr.Dataset({ \"ir\":", "{ \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"SaddleBrown\", }, ] }", "\"longitude\": 0.001, } else: pass return mprod dc.index.products.get_by_name = product_by_name return dc @pytest.fixture", "{}, \"blue\": {}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\":", "tree\", \"bits\": 7, \"values\": { '0': False, '1': True }, }, } })", "\"datacube_ows.wcs_utils.get_netcdf\", \"mime\": \"application/x-netcdf\", \"extension\": \"nc\", \"multi-time\": True, } }, \"native_format\": \"GeoTIFF\", } @pytest.fixture", "- 255] scaled = ((unscaled + 1.0) * 255 / 2).clip(0, 255) return", "= \"Parent Attribution\" return parent @pytest.fixture def minimal_layer_cfg(): return { \"title\": \"The Title\",", "\"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3857\", \"alias_of\": None, },", "\"scale_range\": [0.0, 100.0], \"pq_masks\": [ { # Mask out nodata pixels. \"band\": \"water\",", "0.001, } else: pass return mprod dc.index.products.get_by_name = product_by_name return dc @pytest.fixture def", "def minimal_layer_cfg(): return { \"title\": \"The Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"product_name\":", "\"At least one EO band is missing or saturated\", \"bits\": 1, \"values\": {", "} } ] }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0},", "global_cfg.authorities = { \"auth0\": \"http://test.url/auth0\", \"auth1\": \"http://test.url/auth1\", } global_cfg.published_CRSs = { \"EPSG:3857\": {", "\"floop\": {\"bits\": 1}, \"blat\": {\"bits\": 2}, \"pow\": {\"bits\": 3}, \"zap\": {\"bits\": 4}, \"dang\":", "}, \"splodgy\": { \"bits\": 2, \"values\": { '0': \"Splodgeless\", '1': \"Splodgy\", }, \"description\":", "if 'nonativecrs' in s: pass elif 'badnativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:9999\" elif", "\"kwargs\": { \"band1\": \"green\", \"band2\": \"nir\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) }", "\"end_time\": times[-1], \"time_set\": set(times), \"bboxes\": { \"EPSG:4326\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1,", "\"components\": { \"red\": {\"red\": 1.0}, \"green\": {}, \"blue\": {}, }, \"scale_range\": (50, 3000),", "} global_cfg.published_CRSs = { \"EPSG:3857\": { # Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\",", "rules. { \"title\": \"Cloudy Slopes\", \"abstract\": \"\", \"flags\": {\"and\": {\"cloud\": True, \"high_slope\": True}},", "0.0, \"color\": \"#000000\", \"alpha\": 0.0, }, { \"value\": 0.1, \"color\": \"#000030\", \"alpha\": 1.0,", "{ \"auth0\": \"http://test.url/auth0\", \"auth1\": \"http://test.url/auth1\", } global_cfg.published_CRSs = { \"EPSG:3857\": { # Web", "config_uri = \"s3://testbucket/nested_2.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'[{\"test\": 88888}, {\"include\": \"simple.json\", \"type\":", "0.1, }, } } @pytest.fixture def minimal_global_raw_cfg(): return { \"global\": { \"title\": \"Test", "}, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\":", "\"Steep Terrain\", \"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\": \"#776857\", }, { \"title\": \"Water\",", "\"\", \"flags\": {\"cloud\": True}, \"color\": \"#c2c1c0\", }, { \"title\": \"Cloud Shadow\", \"abstract\": \"\",", "botocore.session import Session session = Session() return session.create_client(\"s3\", endpoint_url=MOTO_S3_ENDPOINT_URI) @pytest.fixture def s3_base(): #", "minimal_multiprod_cfg(): return { \"title\": \"The Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"multi_product\": True,", "1.0}, \"green\": {\"band1\": 1.0}, \"blue\": {\"band1\": 1.0}, }, \"scale_range\": [0, 1024] } ]", "{\"test\": 22564, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_nested_4(s3, s3_config_simple, s3_config_nested_3):", "\"Observations\", \"value_map\": { \"water\": [ { \"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True},", "} }, \"color\": \"#335277\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True},", "\"red\": dim1_da(\"red\", [200, 500, 0, 200, 200, 700], dim_coords), \"green\": dim1_da(\"green\", [100, 500,", "the conditions hold. \"flags\": {\"or\": {\"terrain_shadow\": True, \"high_slope\": True}}, \"color\": \"SlateGray\", }, {", "/ 2).clip(0, 255) return scaled from datacube_ows.styles.api import scalable @scalable def scaled_ndvi(data): #", "\"a_layer\", \"product_name\": \"foo\", \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": { \"default_style\": \"band1\", \"styles\":", "1.0), \"scale_to\": (0, 255) } }, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000),", "\"\", # Flag rules can contain an \"or\" - they match if either", "\"s3://testbucket/simple.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 1234}') @pytest.fixture def s3_config_nested_1(s3, s3_config_simple): config_uri", "[-1.0, -0.5, 0.0, 0.5, 1.0]), (\"time\", [ datetime.datetime(2021, 1, 1, 22, 44, 5),", "dummy_raw_fc_plus_wo(dummy_raw_fc_data, dummy_raw_wo_data): return xarray.combine_by_coords( [dummy_raw_fc_data, dummy_raw_wo_data], join=\"exact\") @pytest.fixture def configs_for_landsat(): def ndvi(data): #", "happen. Might happen.\", }, } }) }) return output def dim1_null_mask(coords): return dim1_da(\"mask\",", "200, 200, 700], dim_coords), \"green\": dim1_da(\"green\", [100, 500, 0, 400, 300, 200], dim_coords),", "Sun Angle\", \"abstract\": \"\", \"flags\": { \"or\": { \"terrain_shadow\": True, \"low_solar_angle\": True },", "= [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] return dim1_da(\"mask\", [True] * len(dim_coords), dim_coords)", "(200, 1900), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": ndvi, \"blue\": {\"blue\":", "= xr.Dataset({ \"pq\": dim1_da(\"pq\", [0b01000, 0b11001, 0b01010, 0b10011, 0b00100, 0b10111], dim_coords, attrs={ \"flags_definition\":", "} global_cfg.folder_index = { \"folder.existing_folder\": MagicMock(), } return global_cfg @pytest.fixture def minimal_parent(): parent", "\"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"SlateGray\", }, { \"title\":", "\"title\": \"\", \"abstract\": \"\", \"flags\": { \"or\": { \"noncontiguous\": True, \"nodata\": True, },", "\"color\": \"#005050\", }, {\"value\": -0.1, \"color\": \"#505050\", }, {\"value\": -0.01, \"color\": \"#303030\", },", ") @pytest.fixture def s3_config_mixed_nested(s3, s3_config_simple): config_uri = \"s3://testbucket/mixed_nested.json\" with s3.open(config_uri, \"wb\") as f_open:", "\"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\": [ { \"title\": \"Water\",", "def minimal_parent(): parent = MagicMock() parent.abstract = \"Parent Abstract\" parent.keywords = {\"global\", \"parent\"}", "return [ { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\":", "= {\"global\", \"parent\"} parent.attribution.title = \"Parent Attribution\" return parent @pytest.fixture def minimal_layer_cfg(): return", "\"styling\": { \"default_style\": \"band1\", \"styles\": [ { \"name\": \"band1\", \"title\": \"Single Band Test", "with low_solar_angle, high_slope # or cloud shadow. \"band\": \"water\", \"flags\": { \"low_solar_angle\": False,", "Key is the format name, as used in DescribeCoverage XML \"GeoTIFF\": { \"renderer\":", "2, \"values\": { '0': \"Splodgeless\", '1': \"Splodgy\", }, \"description\": \"All splodgy looking\" },", "\"blue\": {\"green\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\":", "def dummy_raw_ls_data(): output = xr.Dataset({ \"red\": dummy_da(5, \"red\", coords, dtype=np.int16), \"green\": dummy_da(7, \"green\",", "\"netCDF\": { \"renderer\": \"datacube_ows.wcs_utils.get_netcdf\", \"mime\": \"application/x-netcdf\", \"extension\": \"nc\", \"multi-time\": True, } }, \"native_format\":", "as f_open: f_open.write(b'{\"test\": 2222, \"things\": [{\"test\": 22562, \"thing\": null}, \\ {\"test\": 22563, \"thing\":", "def s3_base(): # writable local S3 system # adapted from https://github.com/dask/s3fs/blob/main/s3fs/tests/test_s3fs.py import subprocess", "{ \"components\": { \"red\": {\"red\": 1.0}, \"green\": {}, \"blue\": {}, }, \"scale_range\": (50,", "{ \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"mapped_bands\": True, \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, },", "\"flags\": { \"water_observed\": True, }, \"color\": \"#4f81bd\", }, { \"title\": \"Dry\", \"abstract\": \"\",", "\"title\": \"Terrain Shadow or Low Sun Angle\", \"abstract\": \"\", \"flags\": { \"or\": {", "lmo def product_by_name(s): if 'lookupfail' in s: return None mprod = MagicMock() flag_def", "{ \"title\": \"Cloudy Slopes\", \"abstract\": \"\", \"flags\": {\"and\": {\"cloud\": True, \"high_slope\": True}}, \"color\":", "{ \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"ocean_r\",", "'nonativeres' in s: pass elif 'nativeres' in s: mprod.definition[\"storage\"][\"resolution\"] = { \"latitude\": 0.001,", "\"application/x-netcdf\", \"extension\": \"nc\", \"multi-time\": True, } }, \"native_format\": \"GeoTIFF\", } @pytest.fixture def dummy_raw_data():", "{ \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000,", "scaled_ndvi, \"kwargs\": { \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, \"blue\": {\"blue\":", "1.0}, \"blue\": {\"band1\": 1.0}, }, \"scale_range\": [0, 1024] } ] } } @pytest.fixture", "}, }, \"water_observed\": { \"description\": \"Classified as water by the decision tree\", \"bits\":", "-= 0.1 time.sleep(0.1) yield proc.terminate() proc.wait() @pytest.fixture() def s3(s3_base, monkeypatch): monkeypatch.setenv(\"AWS_ACCESS_KEY_ID\", \"foo\") monkeypatch.setenv(\"AWS_SECRET_ACCESS_KEY\",", "looking\" }, \"splodgy\": { \"bits\": 2, \"values\": { '0': \"Splodgeless\", '1': \"Splodgy\", },", "0.0, }, { \"value\": 0.1, \"color\": \"#000030\", \"alpha\": 1.0, }, { \"value\": 0.3,", "{ \"and\": { \"water_observed\": True, \"cloud_shadow\": True } }, \"color\": \"#335277\", }, {", "{ \"title\": \"Test Title\", \"info_url\": \"https://my.domain.com/about_us\", \"allowed_urls\": [ \"http://localhost\", \"http://unsecure.domain.com/odc\", \"https://secure.domain.com/ows\", ], \"published_CRSs\":", "Might happen.\", }, } }) }) return output def dim1_null_mask(coords): return dim1_da(\"mask\", [True]", "the format name, as used in DescribeCoverage XML \"GeoTIFF\": { \"renderer\": \"datacube_ows.wcs_utils.get_tiff\", #", "1.0}, }, \"scale_range\": (1000, 3000), }, { \"components\": { \"red\": { \"swir1\": 1.0,", "2).clip(0, 255) return scaled from datacube_ows.styles.api import scalable @scalable def scaled_ndvi(data): # Calculate", "True}}, \"color\": \"BurlyWood\", }, # Only matches non-cloudy high-slopes. { \"title\": \"High Slopes\",", "3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": { \"function\": scaled_ndvi, \"kwargs\":", "mprod dc.index.products.get_by_name = product_by_name return dc @pytest.fixture def minimal_global_cfg(): global_cfg = MagicMock() global_cfg.keywords", "nb, \"bar\": nb, } dc.list_measurements.return_value = lmo def product_by_name(s): if 'lookupfail' in s:", "1234}') @pytest.fixture def s3_config_nested_1(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_1.json\" with s3.open(config_uri, \"wb\") as f_open:", "WGS-84 \"geographic\": True, \"vertical_coord_first\": True, \"horizontal_coord\": \"longitude\", \"vertical_coord\": \"latitude\", \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/4326\", \"alias_of\": None,", "\"Global Attribution\" global_cfg.contact_org = None global_cfg.contact_position = None global_cfg.abstract = \"Global Abstract\" global_cfg.authorities", "1.0}, \"green\": {}, \"blue\": {}, }, \"scale_range\": (50, 3000), }, { \"components\": {", "\"native_format\": \"GeoTIFF\", } @pytest.fixture def dummy_raw_data(): output = xr.Dataset({ \"ir\": dummy_da(3, \"ir\", coords),", "}, \"scale_range\": (10, 800), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\":", "\"json\"}}, \\ {\"test\": 22564, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_nested_4(s3,", "False, } } ] }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\":", "\"Bland\", '1': \"Tasty\", }, \"description\": \"All splodgy looking\" }, \"splodgy\": { \"bits\": 2,", "1.0}, \"green\": ndvi, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\":", "\"scale_range\": (10, 800), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0},", "False, '1': True }, }, } }) }) return output @pytest.fixture def dummy_raw_fc_data():", "\"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/4326\", \"alias_of\": None, }, \"EPSG:3577\": { \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\",", "\"dang\": {\"bits\": 5}, } mprod.lookup_measurements.return_value = { \"band4\": { \"flags_definition\": flag_def } }", "dummy_raw_data(): output = xr.Dataset({ \"ir\": dummy_da(3, \"ir\", coords), \"red\": dummy_da(5, \"red\", coords), \"green\":", "\"band\": \"water\", \"flags\": { \"nodata\": False, \"noncontiguous\": False, \"terrain_shadow\": False, \"low_solar_angle\": False, \"high_slope\":", "\"value_map\": { \"water\": [ { # Make noncontiguous data transparent \"title\": \"\", \"abstract\":", "\"flags\": {\"cloud\": True}, \"color\": \"Beige\", }, { \"title\": \"Terrain\", \"abstract\": \"\", # Flag", "\"#303030\", }, {\"value\": 0.0, \"color\": \"black\", }, {\"value\": 0.01, \"color\": \"#303000\", }, {\"value\":", "= \"s3://testbucket/simple.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 1234}') @pytest.fixture def s3_config_nested_1(s3, s3_config_simple):", "\"abstract\": \"\", \"components\": { \"red\": {\"band1\": 1.0}, \"green\": {\"band1\": 1.0}, \"blue\": {\"band1\": 1.0},", "type of the image, as used in the Http Response. \"mime\": \"image/geotiff\", #", "\"green\": { \"function\": scaled_ndvi, \"kwargs\": { \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) }", "5), datetime.datetime.now() ]) ] @pytest.fixture def xyt_dummydata(): return xarray.Dataset({ \"red\": dummy_da(1400, \"red\", xyt_coords,", "= { \"EPSG:3857\": { # Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\",", "1, \"values\": { '0': False, '1': True }, }, \"low_solar_angle\": { \"description\": \"Low", "{\"global\", \"parent\"} parent.attribution.title = \"Parent Attribution\" return parent @pytest.fixture def minimal_layer_cfg(): return {", "nb, \"foo_nonativecrs\": nb, \"foo\": nb, \"bar\": nb, } dc.list_measurements.return_value = lmo def product_by_name(s):", "(data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) # Scale to [-1.0 - 1.0]", "{ \"red\": {\"swir1\": 1.0}, \"green\": {\"nir\": 1.0}, \"blue\": {\"green\": 1.0}, }, \"scale_range\": (50,", "(50, 3000), }, ] @pytest.fixture def configs_for_wofs(): return [ { \"name\": \"observations\", \"title\":", "{ \"bits\": 0, \"values\": { '0': False, '1': \"Woah!\" }, \"description\": \"Won't happen.", "\"s3://testbucket/nested_2.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'[{\"test\": 88888}, {\"include\": \"simple.json\", \"type\": \"json\"}]') @pytest.fixture", "\"red\": { \"red\": 0.333, \"green\": 0.333, \"blue\": 0.333, }, \"green\": {\"nir\": 1.0}, \"blue\":", "\"wb\") as f_open: f_open.write(b'{\"test\": 9364, \\ \"subtest\": {\"test_py\": {\"include\": \"tests.cfg.simple.simple\", \"type\": \"python\"}, \\", "\"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"enum\": 1, \"invert\": True, }", "] @pytest.fixture def multi_date_cfg(): return { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\",", "\"green\", \"band2\": \"nir\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, }, \"scale_range\":", "{ \"bits\": 2, \"values\": { '0': \"Splodgeless\", '1': \"Splodgy\", }, \"description\": \"All splodgy", "dummy_raw_wo_data): return xarray.combine_by_coords( [dummy_raw_fc_data, dummy_raw_wo_data], join=\"exact\") @pytest.fixture def configs_for_landsat(): def ndvi(data): # Calculate", "\"green\": ndvi, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": {", "\"blue\": dummy_da(2, \"blue\", coords), \"uv\": dummy_da(-1, \"uv\", coords), }) return output @pytest.fixture def", "\"test_json\": {\"include\": \"tests/cfg/simple.json\", \"type\": \"json\"}}}' ) @pytest.fixture def flask_client(monkeypatch): monkeypatch.setenv(\"DEFER_CFG_PARSE\", \"yes\") from datacube_ows.ogc", "\"color\": \"#303000\", }, {\"value\": 0.5, \"color\": \"#707030\", }, {\"value\": 1.0, \"color\": \"#FF9090\", },", "\"band2\": \"nir\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, }, \"scale_range\": (50,", "\\ {\"test\": 22564, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_nested_4(s3, s3_config_simple,", "1.0), \"scale_to\": (0, 255) } }, }, \"scale_range\": (50, 3000), }, { \"index_function\":", "Water\", \"abstract\": \"\", \"flags\": { \"and\": { \"water_observed\": True, \"cloud_shadow\": True } },", "\"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 22564, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}]}'", "Make noncontiguous data transparent \"title\": \"\", \"abstract\": \"\", \"flags\": { \"or\": { \"noncontiguous\":", "{\"water_observed\": True}, \"color\": \"Aqua\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True},", "\"red\": {\"red\": 1.0}, \"green\": {\"red\": 1.0}, \"blue\": {\"red\": 1.0}, }, \"scale_range\": (50, 3000),", "multiple time slices. \"multi-time\": False }, \"netCDF\": { \"renderer\": \"datacube_ows.wcs_utils.get_netcdf\", \"mime\": \"application/x-netcdf\", \"extension\":", "{\"bits\": 2}, \"pow\": {\"bits\": 3}, \"zap\": {\"bits\": 4}, \"dang\": {\"bits\": 5}, } mprod.lookup_measurements.return_value", "\"red\": 0.333, \"green\": 0.333, \"blue\": 0.333, }, \"green\": {\"nir\": 1.0}, \"blue\": { \"swir1\":", "matches non-cloudy high-slopes. { \"title\": \"High Slopes\", \"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\":", "# See https://opendatacube.org for more information. # # Copyright (c) 2017-2021 OWS Contributors", "dim1_da, dummy_da) def get_boto3_client(): from botocore.session import Session session = Session() return session.create_client(\"s3\",", "xr.Dataset({ \"ir\": dummy_da(3, \"ir\", coords), \"red\": dummy_da(5, \"red\", coords), \"green\": dummy_da(7, \"green\", coords),", "[datetime.datetime(2010, 1, 1), datetime.datetime(2010, 1, 2)] return { \"lat\": { \"min\": -0.1, \"max\":", "\"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 0.5), \"scale_to\": (0, 255) } }, },", "\"No data\", \"values\": { '0': False, '1': True }, }, \"noncontiguous\": { \"description\":", "def wcs_global_cfg(): return { \"formats\": { # Key is the format name, as", "\"datacube_ows.band_utils.multi_date_delta\" }, \"mpl_ramp\": \"RdYlBu\", \"range\": [-1.0, 1.0], } ] } xyt_coords = [", "}, }, }, \"layers\": [] } @pytest.fixture def wcs_global_cfg(): return { \"formats\": {", "= \"s3://testbucket/nested_2.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'[{\"test\": 88888}, {\"include\": \"simple.json\", \"type\": \"json\"}]')", "s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 2222, \"things\": [{\"test\": 22562, \"thing\": null}, \\ {\"test\":", "\"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\":", "dummy_da(546, \"bs\", coords, dtype=np.int16), \"pv\": dummy_da(723, \"pv\", coords, dtype=np.int16), \"npv\": dummy_da(209, \"npv\", coords,", "{\"red\": 1.0}, \"green\": {}, \"blue\": {}, }, \"scale_range\": (50, 3000), }, { \"components\":", "f_open: f_open.write(b'{\"test\": 1234}') @pytest.fixture def s3_config_nested_1(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_1.json\" with s3.open(config_uri, \"wb\")", "\"#000000\", \"alpha\": 0.0, }, { \"value\": 0.0, \"color\": \"#000000\", \"alpha\": 0.0, }, {", "\"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [ { \"value\": -1.0, \"color\":", "True, }, \"color\": \"#4f81bd\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False},", "}, \"TEST:NATIVE_CRS\": { \"geographic\": False, \"horizontal_coord\": \"hortizonal_cults\", \"vertical_coord\": \"verbal_tics\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/NATIVE_CRS\",", "True, \"cloud\": True } }, \"color\": \"#f2dcb4\", }, { \"title\": \"Cloudy Water\", \"abstract\":", "1), datetime.datetime(2010, 1, 2)] return { \"lat\": { \"min\": -0.1, \"max\": 0.1, },", "\"water\", \"enum\": 1, } ] }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\":", "{ \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False, } }, { # Mask out", "output @pytest.fixture def null_mask(): return dummy_da(True, \"mask\", coords, dtype=np.bool) @pytest.fixture def dummy_raw_calc_data(): dim_coords", "\"scale_to\": (0, 255) } }, \"blue\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"green\",", "\"color\": \"#707030\", }, {\"value\": 1.0, \"color\": \"#FF9090\", }, ], \"multi_date\": [ { \"allowed_count_range\":", "\"cloud\": True, \"water_observed\": False, }, \"invert\": True, }, ] } ] @pytest.fixture def", "\"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\":", "\"blue\": {\"npv\": 1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { # Mask out", "{ \"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", }, { \"title\":", "\"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0} }, \"scale_range\": [0.0, 100.0],", "}) return output @pytest.fixture def null_mask(): return dummy_da(True, \"mask\", coords, dtype=np.bool) @pytest.fixture def", "0b10111], dim_coords, attrs={ \"flags_definition\": { \"joviality\": { \"bits\": 3, \"values\": { '0': \"Melancholic\",", "global_cfg.folder_index = { \"folder.existing_folder\": MagicMock(), } return global_cfg @pytest.fixture def minimal_parent(): parent =", "part of datacube-ows, part of the Open Data Cube project. # See https://opendatacube.org", "coords), \"green\": dummy_da(7, \"green\", coords), \"blue\": dummy_da(2, \"blue\", coords), \"uv\": dummy_da(-1, \"uv\", coords),", "no water observed \"band\": \"water\", \"flags\": { \"cloud\": True, \"water_observed\": False, }, \"invert\":", "}, \"mpl_ramp\": \"ocean_r\", \"range\": [0.0, 1.0] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\":", "Test Style\", \"abstract\": \"\", \"components\": { \"red\": {\"band1\": 1.0}, \"green\": {\"band1\": 1.0}, \"blue\":", "(10, 800), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\":", "\"range\": [0.0, 1.0] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\":", "\"alpha\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 0.5),", "{ \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0} }, \"scale_range\": [0.0,", "\"json\"}}]}' ) @pytest.fixture def s3_config_nested_4(s3, s3_config_simple, s3_config_nested_3): config_uri = \"s3://testbucket/nested_4.json\" with s3.open(config_uri, \"wb\")", "{ '0': False, '1': True }, }, \"cloud\": { \"description\": \"Cloudy\", \"bits\": 6,", "\"multi_product\": True, \"product_names\": [\"foo\", \"bar\"], \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": { \"default_style\":", "\"Brown\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"Beige\", },", "data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) return [ { \"components\": { \"red\": {\"red\": 1.0},", "\"terrain_shadow\": { \"description\": \"Terrain shadow\", \"bits\": 3, \"values\": { '0': False, '1': True", "dummy_da(2, \"blue\", coords), \"uv\": dummy_da(-1, \"uv\", coords), }) return output @pytest.fixture def null_mask():", "\"product_name\": \"foo\", \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": { \"default_style\": \"band1\", \"styles\": [", "* 255 / 2).clip(0, 255) return scaled from datacube_ows.styles.api import scalable @scalable def", "\"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 22564, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}]}' ) @pytest.fixture", "\"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_nested_4(s3, s3_config_simple, s3_config_nested_3): config_uri =", "1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { # Mask out nodata pixels.", "tests.utils import (MOTO_PORT, MOTO_S3_ENDPOINT_URI, coords, dim1_da, dummy_da) def get_boto3_client(): from botocore.session import Session", "dim1_da(\"green\", [100, 500, 0, 400, 300, 200], dim_coords), \"blue\": dim1_da(\"blue\", [200, 500, 1000,", "= Session() return session.create_client(\"s3\", endpoint_url=MOTO_S3_ENDPOINT_URI) @pytest.fixture def s3_base(): # writable local S3 system", "False, \"high_slope\": False, \"cloud_shadow\": False, \"cloud\": False, \"water_observed\": False, } } ] },", "\"band2\": \"red\", \"scale_from\": (0.0, 0.5), \"scale_to\": (0, 255) } }, }, \"scale_range\": (50,", "1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"enum\": 1, \"invert\":", "= MagicMock() parent.abstract = \"Parent Abstract\" parent.keywords = {\"global\", \"parent\"} parent.attribution.title = \"Parent", "nb.__getitem__.return_value = { \"band1\": -999, \"band2\": -999, \"band3\": float(\"nan\"), \"band4\": \"nan\", } lmo", "\"value\": 1.0, \"color\": \"#90FF90\", } ] }, { \"components\": { \"red\": {\"red\": 1.0},", "\"value\": 0.1, \"color\": \"#000030\", \"alpha\": 1.0, }, { \"value\": 0.3, \"color\": \"#703070\", },", "{ \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"Brown\", }, ] }", "Abstract\", \"name\": \"a_layer\", \"multi_product\": True, \"product_names\": [\"foo\", \"bar\"], \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", },", "data[\"red\"]) # Scale to [-1.0 - 1.0] to [0 - 255] scaled =", "None, }, \"TEST:CRS\": { \"geographic\": False, \"horizontal_coord\": \"horrible_zonts\", \"vertical_coord\": \"vertex_calories\", \"vertical_coord_first\": False, \"gml_name\":", "\"bits\": 0, \"values\": { '0': False, '1': \"Woah!\" }, \"description\": \"Won't happen. Can't", "{ '0': \"Splodgeless\", '1': \"Splodgy\", }, \"description\": \"All splodgy looking\" }, \"ugly\": {", "\"bits\": 3, \"values\": { '0': \"Melancholic\", '1': \"Joyous\", }, \"description\": \"All splodgy looking\"", "if 'lookupfail' in s: return None mprod = MagicMock() flag_def = { \"moo\":", "True}}, \"color\": \"SlateGray\", }, { \"title\": \"Cloud Shadow and High Slope\", \"abstract\": \"\",", "True}, \"color\": \"#c2c1c0\", }, { \"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True},", "[0.0, 1.0] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"},", "\"wb\") as f_open: f_open.write(b'{\"include\": \"simple.json\", \"type\": \"json\"}') @pytest.fixture def s3_config_nested_2(s3, s3_config_simple): config_uri =", "return output def dim1_null_mask(coords): return dim1_da(\"mask\", [True] * len(coords), coords) @pytest.fixture def raw_calc_null_mask():", "1, } ] }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0},", "\"auth1\": \"http://test.url/auth1\", } global_cfg.published_CRSs = { \"EPSG:3857\": { # Web Mercator \"geographic\": False,", "\"TEST/CRS\", \"alias_of\": None, }, \"TEST:NATIVE_CRS\": { \"geographic\": False, \"horizontal_coord\": \"hortizonal_cults\", \"vertical_coord\": \"verbal_tics\", \"vertical_coord_first\":", "\"Cloudy\", \"bits\": 6, \"values\": { '0': False, '1': True }, }, \"water_observed\": {", "@pytest.fixture def s3_config_nested_4(s3, s3_config_simple, s3_config_nested_3): config_uri = \"s3://testbucket/nested_4.json\" with s3.open(config_uri, \"wb\") as f_open:", "800), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\":", "\"blue\": {\"blue\": 1.0}, \"alpha\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\",", "\"bits\": 2, \"values\": { '0': False, '1': True }, }, \"terrain_shadow\": { \"description\":", "def s3(s3_base, monkeypatch): monkeypatch.setenv(\"AWS_ACCESS_KEY_ID\", \"foo\") monkeypatch.setenv(\"AWS_SECRET_ACCESS_KEY\", \"bar\") client = get_boto3_client() client.create_bucket(Bucket=\"testbucket\") S3FileSystem.clear_instance_cache() s3", "\"terrain_shadow\": True, \"low_solar_angle\": True }, }, \"color\": \"#2f2922\", }, { \"title\": \"Steep Terrain\",", "\"color\": \"#96966e\", }, ] }, }, ] @pytest.fixture def configs_for_combined_fc_wofs(): return [ {", "elif 'nativeres' in s: mprod.definition[\"storage\"][\"resolution\"] = { \"latitude\": 0.001, \"longitude\": 0.001, } else:", "pass timeout -= 0.1 time.sleep(0.1) yield proc.terminate() proc.wait() @pytest.fixture() def s3(s3_base, monkeypatch): monkeypatch.setenv(\"AWS_ACCESS_KEY_ID\",", "s3_config_nested_1(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_1.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"include\": \"simple.json\", \"type\":", "system # adapted from https://github.com/dask/s3fs/blob/main/s3fs/tests/test_s3fs.py import subprocess proc = subprocess.Popen([\"moto_server\", \"s3\", \"-p\", MOTO_PORT])", "{\"include\": \"nested_3.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_mixed_nested(s3, s3_config_simple): config_uri = \"s3://testbucket/mixed_nested.json\" with", "\"GeoTIFF\": { \"renderer\": \"datacube_ows.wcs_utils.get_tiff\", # The MIME type of the image, as used", "{ \"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"#4b4b37\", }, {", "\"image/geotiff\", # The file extension to add to the filename. \"extension\": \"tif\", #", "\"bits\": 1, \"values\": { '0': False, '1': True }, \"description\": \"Real, real ugly\",", "'band4'] nb.__getitem__.return_value = { \"band1\": -999, \"band2\": -999, \"band3\": float(\"nan\"), \"band4\": \"nan\", }", "r.ok: break except: pass timeout -= 0.1 time.sleep(0.1) yield proc.terminate() proc.wait() @pytest.fixture() def", "\"bits\": 5, \"values\": { '0': False, '1': True }, }, \"cloud\": { \"description\":", "product_by_name return dc @pytest.fixture def minimal_global_cfg(): global_cfg = MagicMock() global_cfg.keywords = {\"global\"} global_cfg.attribution.title", "{ # Mask out pixels with low_solar_angle, high_slope # or cloud shadow. \"band\":", "1, \"invert\": True, } ] }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\":", "(1000, 8000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\":", "hold. \"flags\": {\"and\": {\"cloud_shadow\": True, \"high_slope\": True}}, \"color\": \"DarkKhaki\", }, { \"title\": \"Dry\",", "Attribution\" global_cfg.contact_org = None global_cfg.contact_position = None global_cfg.abstract = \"Global Abstract\" global_cfg.authorities =", "{\"value\": -0.01, \"color\": \"#303030\", }, {\"value\": 0.0, \"color\": \"black\", }, {\"value\": 0.01, \"color\":", "{\"red\": 1.0}, \"green\": ndvi, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, {", "def raw_calc_null_mask(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] return dim1_da(\"mask\", [True]", "{\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000, 8000), },", "- data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) return [ { \"components\": { \"red\": {\"red\":", "(50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {}, \"blue\": {},", "dim1_da(\"pq\", [0b01000, 0b11001, 0b01010, 0b10011, 0b00100, 0b10111], dim_coords, attrs={ \"flags_definition\": { \"joviality\": {", "Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"multi_product\": True, \"product_names\": [\"foo\", \"bar\"], \"image_processing\": {", "False, \"cloud_shadow\": False, \"cloud\": False, \"water_observed\": False, } } ] }, { \"components\":", "{ '0': False, '1': True }, }, \"terrain_shadow\": { \"description\": \"Terrain shadow\", \"bits\":", "True } }, \"color\": \"#335277\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\":", "dtype=np.int16), \"nir\": dummy_da(101, \"nir\", coords, dtype=np.int16), \"swir1\": dummy_da(1051, \"swir1\", coords, dtype=np.int16), \"swir2\": dummy_da(1051,", "xr.Dataset({ \"ir\": dim1_da(\"ir\", [800, 100, 1000, 600, 200, 1000], dim_coords), \"red\": dim1_da(\"red\", [200,", "\"ir\": dim1_da(\"ir\", [800, 100, 1000, 600, 200, 1000], dim_coords), \"red\": dim1_da(\"red\", [200, 500,", "255 / 2).clip(0, 255) return scaled from datacube_ows.styles.api import scalable @scalable def scaled_ndvi(data):", "xr.Dataset({ \"red\": dummy_da(5, \"red\", coords, dtype=np.int16), \"green\": dummy_da(7, \"green\", coords, dtype=np.int16), \"blue\": dummy_da(2,", "as used in the Http Response. \"mime\": \"image/geotiff\", # The file extension to", "@pytest.fixture def s3_config_simple(s3): config_uri = \"s3://testbucket/simple.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 1234}')", "\"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3577\", \"alias_of\": None, }, \"TEST:CRS\": { \"geographic\": False, \"horizontal_coord\": \"horrible_zonts\", \"vertical_coord\": \"vertex_calories\",", "}, \"noncontiguous\": { \"description\": \"At least one EO band is missing or saturated\",", "\"abstract\": \"\", \"flags\": { \"or\": { \"terrain_shadow\": True, \"low_solar_angle\": True }, }, \"color\":", "\"cloud\": False, \"water_observed\": False, } } ] }, { \"components\": { \"red\": {\"bs\":", "{ \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 0.5), \"scale_to\": (0, 255) } },", "\"http://www.opengis.net/def/crs/EPSG/0/3857\", \"alias_of\": None, }, \"EPSG:4326\": { # WGS-84 \"geographic\": True, \"vertical_coord_first\": True, \"horizontal_coord\":", "False, }, \"invert\": True, }, ] } ] @pytest.fixture def multi_date_cfg(): return {", "4}, \"dang\": {\"bits\": 5}, } mprod.lookup_measurements.return_value = { \"band4\": { \"flags_definition\": flag_def }", "True}}, \"color\": \"DarkKhaki\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\":", "6, \"values\": { '0': False, '1': True }, }, \"water_observed\": { \"description\": \"Classified", "flag_def } } mprod.definition = {\"storage\": {}} if 'nonativecrs' in s: pass elif", "\"times\": times, \"start_time\": times[0], \"end_time\": times[-1], \"time_set\": set(times), \"bboxes\": { \"EPSG:4326\": {\"top\": 0.1,", "2)] return { \"lat\": { \"min\": -0.1, \"max\": 0.1, }, \"lon\": { \"min\":", "\"left\": -0.1, \"right\": 0.1, }, \"EPSG:3577\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\":", "False}, \"color\": \"#96966e\", }, ] }, }, ] @pytest.fixture def configs_for_combined_fc_wofs(): return [", "{ \"title\": \"Terrain Shadow or Low Sun Angle\", \"abstract\": \"\", \"flags\": { \"or\":", "solar incidence angle\", \"bits\": 2, \"values\": { '0': False, '1': True }, },", "in s: pass elif 'badnativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:9999\" elif 'nativecrs' in", "as xr from s3fs.core import S3FileSystem from tests.utils import (MOTO_PORT, MOTO_S3_ENDPOINT_URI, coords, dim1_da,", "happen. Can't happen. Might happen.\", }, } }) }) return output @pytest.fixture def", "{ \"flags_definition\": { \"nodata\": { \"bits\": 0, \"description\": \"No data\", \"values\": { '0':", "0b00100, 0b10111], dim_coords, attrs={ \"flags_definition\": { \"joviality\": { \"bits\": 3, \"values\": { '0':", "attrs={ \"flags_definition\": { \"joviality\": { \"bits\": 3, \"values\": { '0': \"Melancholic\", '1': \"Joyous\",", "\"components\": { \"red\": { \"swir1\": 1.0, \"scale_range\": (1500, 3700), }, \"green\": { \"nir\":", "import xarray import xarray as xr from s3fs.core import S3FileSystem from tests.utils import", "\"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [ {\"value\": -1.0, \"color\":", "\"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, \"EPSG:3857\": {\"top\": 0.1, \"bottom\": -0.1, \"left\":", "len(dim_coords), dim_coords) @pytest.fixture def dummy_col_map_data(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0]", "3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"red\": 1.0}, \"blue\": {\"red\":", "} }, }, \"scale_range\": (50, 3000), }, ] @pytest.fixture def configs_for_wofs(): return [", "\"noncontiguous\": False, \"terrain_shadow\": False, \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False, \"cloud\": False, \"water_observed\":", "}, {\"value\": 0.5, \"color\": \"#707030\", }, {\"value\": 1.0, \"color\": \"#FF9090\", }, ] },", "return None mprod = MagicMock() flag_def = { \"moo\": {\"bits\": 0}, \"floop\": {\"bits\":", "try: r = requests.get(MOTO_S3_ENDPOINT_URI) if r.ok: break except: pass timeout -= 0.1 time.sleep(0.1)", "in the Http Response. \"mime\": \"image/geotiff\", # The file extension to add to", "\"black\", }, {\"value\": 0.01, \"color\": \"#303000\", }, {\"value\": 0.5, \"color\": \"#707030\", }, {\"value\":", "{ \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, \"blue\": {\"blue\": 1.0}, },", "s: mprod.definition[\"storage\"][\"resolution\"] = { \"latitude\": 0.001, \"longitude\": 0.001, } else: pass return mprod", "1.0}, \"green\": {\"red\": 1.0}, \"blue\": {\"red\": 1.0}, }, \"scale_range\": (50, 3000), }, {", "}, { \"title\": \"Cloudy Steep Terrain\", \"abstract\": \"\", \"flags\": { \"and\": { \"high_slope\":", "\"flags\": { \"or\": { \"terrain_shadow\": True, \"low_solar_angle\": True }, }, \"color\": \"#2f2922\", },", "conditions hold. \"flags\": {\"or\": {\"terrain_shadow\": True, \"high_slope\": True}}, \"color\": \"SlateGray\", }, { \"title\":", "\"EPSG:3857\": { # Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", }, \"EPSG:4326\":", "# Flag rules can contain an \"and\" - they match if all of", "\"GeoTIFF\", } @pytest.fixture def dummy_raw_data(): output = xr.Dataset({ \"ir\": dummy_da(3, \"ir\", coords), \"red\":", "\"low_solar_angle\": True }, }, \"color\": \"#2f2922\", }, { \"title\": \"Steep Terrain\", \"abstract\": \"\",", "time from unittest.mock import MagicMock import numpy as np import pytest import requests", "[200, 500, 1000, 600, 100, 700], dim_coords), \"uv\": dim1_da(\"uv\", [400, 600, 900, 200,", "\"green\": {\"nir\": 1.0}, \"blue\": {\"green\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\":", "dummy_da(7, \"green\", coords), \"blue\": dummy_da(2, \"blue\", coords), \"uv\": dummy_da(-1, \"uv\", coords), }) return", "\"extension\": \"nc\", \"multi-time\": True, } }, \"native_format\": \"GeoTIFF\", } @pytest.fixture def dummy_raw_data(): output", "}, \"ugly\": { \"bits\": 1, \"values\": { '0': False, '1': True }, \"description\":", "}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\":", "} } mprod.definition = {\"storage\": {}} if 'nonativecrs' in s: pass elif 'badnativecrs'", "}, \"scale_range\": (200, 1900), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": ndvi,", "{ \"nodata\": { \"bits\": 0, \"description\": \"No data\", \"values\": { '0': False, '1':", "} }, \"cloud_shadow\": { \"description\": \"Cloud shadow\", \"bits\": 5, \"values\": { '0': False,", "((unscaled + 1.0) * 255 / 2).clip(0, 255) return scaled from datacube_ows.styles.api import", "\"bits\": 2, \"values\": { '0': \"Splodgeless\", '1': \"Splodgy\", }, \"description\": \"All splodgy looking\"", "get_boto3_client() client.create_bucket(Bucket=\"testbucket\") S3FileSystem.clear_instance_cache() s3 = S3FileSystem(anon=False, client_kwargs={\"endpoint_url\": MOTO_S3_ENDPOINT_URI}) s3.invalidate_cache() yield s3 @pytest.fixture def", "@pytest.fixture def xyt_dummydata(): return xarray.Dataset({ \"red\": dummy_da(1400, \"red\", xyt_coords, dtype=\"int16\"), \"green\": dummy_da(700, \"green\",", "True }, }, \"cloud\": { \"description\": \"Cloudy\", \"bits\": 6, \"values\": { '0': False,", "\"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\":", "# Calculate NDVI (-1.0 to 1.0) return (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] +", "s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 3222, \"things\": [{\"test\": 2572, \"thing\": null}, \\ {\"test\":", "f_open: f_open.write(b'{\"test\": 9364, \\ \"subtest\": {\"test_py\": {\"include\": \"tests.cfg.simple.simple\", \"type\": \"python\"}, \\ \"test_json\": {\"include\":", "DescribeCoverage XML \"GeoTIFF\": { \"renderer\": \"datacube_ows.wcs_utils.get_tiff\", # The MIME type of the image,", "\"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) }", "\"SlateGray\", }, { \"title\": \"Cloud Shadow and High Slope\", \"abstract\": \"\", # Flag", "-1.0, \"color\": \"#0000FF\"}, {\"value\": -0.2, \"color\": \"#005050\", }, {\"value\": -0.1, \"color\": \"#505050\", },", "\"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"Beige\", }, { \"title\": \"Cloud Shadow\", \"abstract\":", "\"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3577\", \"alias_of\": None, }, \"TEST:CRS\": {", "pass return mprod dc.index.products.get_by_name = product_by_name return dc @pytest.fixture def minimal_global_cfg(): global_cfg =", "\"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"#96966e\", }, ] }, }, ]", "import pytest import requests import xarray import xarray as xr from s3fs.core import", "@pytest.fixture def dummy_raw_ls_data(): output = xr.Dataset({ \"red\": dummy_da(5, \"red\", coords, dtype=np.int16), \"green\": dummy_da(7,", "255) } }, \"blue\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"green\", \"band2\": \"nir\",", "{\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, \"alpha\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\",", "{ \"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"SlateGray\", }, {", "\"color\": \"#335277\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"#c2c1c0\",", "dim_coords, attrs={ \"flags_definition\": { \"joviality\": { \"bits\": 3, \"values\": { '0': \"Melancholic\", '1':", "1900), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": ndvi, \"blue\": {\"blue\": 1.0},", "}, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"SaddleBrown\", }, ]", "\"flags\": {\"high_slope\": True}, \"color\": \"Brown\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\":", "{\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, } } @pytest.fixture def", "500, 1000, 600, 100, 700], dim_coords), \"uv\": dim1_da(\"uv\", [400, 600, 900, 200, 400,", "Slopes\", \"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\": \"Brown\", }, { \"title\": \"Cloud\", \"abstract\":", "Can't happen. Might happen.\", }, } }) }) return output def dim1_null_mask(coords): return", "\"title\": \"Test Title\", \"info_url\": \"https://my.domain.com/about_us\", \"allowed_urls\": [ \"http://localhost\", \"http://unsecure.domain.com/odc\", \"https://secure.domain.com/ows\", ], \"published_CRSs\": {", "return parent @pytest.fixture def minimal_layer_cfg(): return { \"title\": \"The Title\", \"abstract\": \"The Abstract\",", "(0, 255) } }, }, \"scale_range\": (50, 3000), }, ] @pytest.fixture def configs_for_wofs():", "[800, 100, 1000, 600, 200, 1000], dim_coords), \"red\": dim1_da(\"red\", [200, 500, 0, 200,", "22562, \"thing\": null}, \\ {\"test\": 22563, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\":", "\"type\": \"json\"}}, \\ {\"test\": 2574, \"thing\": {\"include\": \"nested_3.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def", "7, \"values\": { '0': False, '1': True }, }, } }) }) return", "ndvi, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\":", "f_open.write(b'{\"test\": 3222, \"things\": [{\"test\": 2572, \"thing\": null}, \\ {\"test\": 2573, \"thing\": {\"include\": \"simple.json\",", "return dc @pytest.fixture def minimal_global_cfg(): global_cfg = MagicMock() global_cfg.keywords = {\"global\"} global_cfg.attribution.title =", "{\"band1\": 1.0}, \"blue\": {\"band1\": 1.0}, }, \"scale_range\": [0, 1024] } ] } }", "xyt_coords, dtype=\"int16\"), \"green\": dummy_da(700, \"green\", xyt_coords, dtype=\"int16\"), \"blue\": dummy_da(1500, \"blue\", xyt_coords, dtype=\"int16\"), \"nir\":", "\"http://unsecure.domain.com/odc\", \"https://secure.domain.com/ows\", ], \"published_CRSs\": { \"EPSG:3857\": { # Web Mercator \"geographic\": False, \"horizontal_coord\":", "{\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 22564, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}]}' )", "-0.2, \"color\": \"#005050\", }, {\"value\": -0.1, \"color\": \"#505050\", }, {\"value\": -0.01, \"color\": \"#303030\",", "Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3857\", \"alias_of\":", "if 'nonativeres' in s: pass elif 'nativeres' in s: mprod.definition[\"storage\"][\"resolution\"] = { \"latitude\":", "MagicMock() nb = MagicMock() nb.index = ['band1', 'band2', 'band3', 'band4'] nb.__getitem__.return_value = {", "100.0], \"pq_masks\": [ { # Mask out nodata pixels. \"band\": \"water\", \"enum\": 1,", "dummy_da(1051, \"swir2\", coords, dtype=np.int16), }) return output @pytest.fixture def dummy_raw_wo_data(): output = xr.Dataset({", "\"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3857\", \"alias_of\": None, }, \"EPSG:4326\": { # WGS-84 \"geographic\":", "\"red\": {\"red\": 1.0}, \"green\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\",", "minimal_parent(): parent = MagicMock() parent.abstract = \"Parent Abstract\" parent.keywords = {\"global\", \"parent\"} parent.attribution.title", "\"color\": \"#776857\", }, { \"title\": \"Water\", \"abstract\": \"\", \"flags\": { \"water_observed\": True, },", "\"#703070\", }, { \"value\": 0.6, \"color\": \"#e0e070\", }, { \"value\": 1.0, \"color\": \"#90FF90\",", "1000, 600, 100, 700], dim_coords), \"uv\": dim1_da(\"uv\", [400, 600, 900, 200, 400, 100],", "{\"water_observed\": False}, \"color\": \"Brown\", }, ] } }, { \"name\": \"observations\", \"title\": \"Observations\",", "} @pytest.fixture def dummy_raw_data(): output = xr.Dataset({ \"ir\": dummy_da(3, \"ir\", coords), \"red\": dummy_da(5,", "\"description\": \"Cloud shadow\", \"bits\": 5, \"values\": { '0': False, '1': True }, },", "\"EPSG:3857\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, } } @pytest.fixture", "{ \"components\": { \"red\": {\"red\": 1.0}, \"green\": { \"function\": scaled_ndvi, \"kwargs\": { \"scale_from\":", "\"TEST:NATIVE_CRS\": { \"geographic\": False, \"horizontal_coord\": \"hortizonal_cults\", \"vertical_coord\": \"verbal_tics\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/NATIVE_CRS\", \"alias_of\":", "\"#303000\", }, {\"value\": 0.5, \"color\": \"#707030\", }, {\"value\": 1.0, \"color\": \"#FF9090\", }, ]", "{\"band1\": 1.0}, }, \"scale_range\": [0, 1024] } ] } } @pytest.fixture def minimal_multiprod_cfg():", "\"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"product_name\": \"foo\", \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\":", "\"multi-time\": False }, \"netCDF\": { \"renderer\": \"datacube_ows.wcs_utils.get_netcdf\", \"mime\": \"application/x-netcdf\", \"extension\": \"nc\", \"multi-time\": True,", "{ \"description\": \"Terrain shadow\", \"bits\": 3, \"values\": { '0': False, '1': True },", "}, } global_cfg.folder_index = { \"folder.existing_folder\": MagicMock(), } return global_cfg @pytest.fixture def minimal_parent():", "of the image, as used in the Http Response. \"mime\": \"image/geotiff\", # The", "{\"pv\": 1.0}, \"blue\": {\"npv\": 1.0}}, \"scale_range\": [0.0, 100.0], }, { \"components\": { \"red\":", "\"color\": \"#2f2922\", }, { \"title\": \"Steep Terrain\", \"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\":", "= xr.Dataset({ \"ir\": dim1_da(\"ir\", [800, 100, 1000, 600, 200, 1000], dim_coords), \"red\": dim1_da(\"red\",", "\"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"ocean_r\", \"range\": [0.0, 1.0] }, {", "{\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), },", "\"and\" - they match if all of the conditions hold. \"flags\": {\"and\": {\"cloud_shadow\":", "xyt_dummydata(): return xarray.Dataset({ \"red\": dummy_da(1400, \"red\", xyt_coords, dtype=\"int16\"), \"green\": dummy_da(700, \"green\", xyt_coords, dtype=\"int16\"),", "NDVI (-1.0 to 1.0) return (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) return", "mprod.definition[\"storage\"][\"crs\"] = \"EPSG:4326\" else: pass if 'nonativeres' in s: pass elif 'nativeres' in", "(0.0, 0.5), \"scale_to\": (0, 255) } }, }, \"scale_range\": (50, 3000), }, ]", "}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\":", "\"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3577\", \"alias_of\": None, },", "(50, 3000), }, { \"components\": { \"red\": {\"swir1\": 1.0}, \"green\": {\"nir\": 1.0}, \"blue\":", "\"red\": {\"red\": 1.0}, \"green\": {}, \"blue\": {}, }, \"scale_range\": (50, 3000), }, {", "dim1_da(\"ir\", [800, 100, 1000, 600, 200, 1000], dim_coords), \"red\": dim1_da(\"red\", [200, 500, 0,", "\"nodata\": False, \"noncontiguous\": False, \"terrain_shadow\": False, \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False, \"cloud\":", "-2.0, -3.0] output = xr.Dataset({ \"ir\": dim1_da(\"ir\", [800, 100, 1000, 600, 200, 1000],", "\"type\": \"json\"}') @pytest.fixture def s3_config_nested_2(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_2.json\" with s3.open(config_uri, \"wb\") as", "3, \"values\": { '0': False, '1': True }, }, \"high_slope\": { \"description\": \"High", "}, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {}, \"blue\": {}, }, \"scale_range\":", "\"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, } } @pytest.fixture def minimal_global_raw_cfg(): return", "\"Terrain Shadow or Low Sun Angle\", \"abstract\": \"\", \"flags\": { \"or\": { \"terrain_shadow\":", "\"Aqua\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"Beige\", },", "{ '0': False, '1': True }, \"description\": \"Real, real ugly\", }, \"impossible\": {", "Response. \"mime\": \"image/geotiff\", # The file extension to add to the filename. \"extension\":", "}) }) return output def dim1_null_mask(coords): return dim1_da(\"mask\", [True] * len(coords), coords) @pytest.fixture", "\"blue\": {\"red\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {", "\"nir\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, }, \"scale_range\": (50, 3000),", "\"and\": { \"water_observed\": True, \"cloud\": True } }, \"color\": \"#bad4f2\", }, { \"title\":", "dtype=np.int16), \"swir1\": dummy_da(1051, \"swir1\", coords, dtype=np.int16), \"swir2\": dummy_da(1051, \"swir2\", coords, dtype=np.int16), }) return", "data transparent \"title\": \"\", \"abstract\": \"\", \"flags\": { \"or\": { \"noncontiguous\": True, \"nodata\":", "1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0}}, \"scale_range\": [0.0, 100.0], }, { \"components\":", "\"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", }, { \"title\": \"Cloud\", \"abstract\":", "elif 'badnativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:9999\" elif 'nativecrs' in s: mprod.definition[\"storage\"][\"crs\"] =", "Open Data Cube project. # See https://opendatacube.org for more information. # # Copyright", "Mask out nodata pixels. \"band\": \"water\", \"enum\": 1, \"invert\": True, }, { #", "\"right\": 0.1, }, } } @pytest.fixture def minimal_global_raw_cfg(): return { \"global\": { \"title\":", "attrs={ \"flags_definition\": { \"splodgy\": { \"bits\": 2, \"values\": { '0': \"Splodgeless\", '1': \"Splodgy\",", "\"s3\", \"-p\", MOTO_PORT]) timeout = 5 while timeout > 0: try: r =", "is the format name, as used in DescribeCoverage XML \"GeoTIFF\": { \"renderer\": \"datacube_ows.wcs_utils.get_tiff\",", "\"abstract\": \"Observations\", \"value_map\": { \"water\": [ { \"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\":", "True, \"high_slope\": True}}, \"color\": \"DarkKhaki\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\":", "{}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\":", "\"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"SlateGray\", }, { \"title\": \"Water\", \"abstract\": \"\", \"flags\":", "or Low Sun Angle\", \"abstract\": \"\", \"flags\": { \"or\": { \"terrain_shadow\": True, \"low_solar_angle\":", "xarray as xr from s3fs.core import S3FileSystem from tests.utils import (MOTO_PORT, MOTO_S3_ENDPOINT_URI, coords,", "0.0, 0.5, 1.0]), (\"y\", [-1.0, -0.5, 0.0, 0.5, 1.0]), (\"time\", [ datetime.datetime(2021, 1,", "{\"include\": \"simple.json\", \"type\": \"json\"}]') @pytest.fixture def s3_config_nested_3(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_3.json\" with s3.open(config_uri,", "200, 400, 100], dim_coords), \"pq\": dim1_da(\"pq\", [0b000, 0b001, 0b010, 0b011, 0b100, 0b111], dim_coords,", "'1': True }, }, \"terrain_shadow\": { \"description\": \"Terrain shadow\", \"bits\": 3, \"values\": {", "\"swir2\": dummy_da(1051, \"swir2\", coords, dtype=np.int16), }) return output @pytest.fixture def dummy_raw_wo_data(): output =", "import Session session = Session() return session.create_client(\"s3\", endpoint_url=MOTO_S3_ENDPOINT_URI) @pytest.fixture def s3_base(): # writable", "22564, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_nested_4(s3, s3_config_simple, s3_config_nested_3): config_uri", "\"description\": \"Won't happen. Can't happen. Might happen.\", }, } }) }) return output", "{ '0': False, '1': True }, }, \"high_slope\": { \"description\": \"High slope\", \"bits\":", "}, \"high_slope\": { \"description\": \"High slope\", \"bits\": 4, \"values\": { '0': False, '1':", "\"scale_range\": (50, 3000), }, ] @pytest.fixture def configs_for_wofs(): return [ { \"name\": \"observations\",", "{ \"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\": [ { \"title\":", "\"high_slope\": False, \"cloud_shadow\": False, \"cloud\": False, \"water_observed\": False, } } ] }, {", "} }, { \"value_map\": { \"water\": [ { # Make noncontiguous data transparent", "noncontiguous data transparent \"title\": \"\", \"abstract\": \"\", \"flags\": { \"or\": { \"noncontiguous\": True,", "[\"foo\", \"bar\"], \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": { \"default_style\": \"band1\", \"styles\": [", "{\"storage\": {}} if 'nonativecrs' in s: pass elif 'badnativecrs' in s: mprod.definition[\"storage\"][\"crs\"] =", "{\"global\"} global_cfg.attribution.title = \"Global Attribution\" global_cfg.contact_org = None global_cfg.contact_position = None global_cfg.abstract =", "{ \"geographic\": False, \"horizontal_coord\": \"horrible_zonts\", \"vertical_coord\": \"vertex_calories\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/CRS\", \"alias_of\": None,", "\"green\", xyt_coords, dtype=\"int16\"), \"blue\": dummy_da(1500, \"blue\", xyt_coords, dtype=\"int16\"), \"nir\": dummy_da(2000, \"nir\", xyt_coords, dtype=\"int16\"),", "\"Melancholic\", '1': \"Joyous\", }, \"description\": \"All splodgy looking\" }, \"flavour\": { \"bits\": 3,", "\"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", }, { \"title\": \"Dry\", \"abstract\": \"\",", "'0': False, '1': True }, }, \"water_observed\": { \"description\": \"Classified as water by", "\"uv\", coords), }) return output @pytest.fixture def null_mask(): return dummy_da(True, \"mask\", coords, dtype=np.bool)", "\"color\": \"#000000\", \"alpha\": 0.0, }, { \"value\": 0.1, \"color\": \"#000030\", \"alpha\": 1.0, },", "\"scale_to\": (0, 255) } }, }, \"scale_range\": (50, 3000), }, { \"index_function\": {", "\"RdYlBu\", \"range\": [-1.0, 1.0], } ] } xyt_coords = [ (\"x\", [-1.0, -0.5,", "{\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, \"EPSG:3857\": {\"top\": 0.1, \"bottom\":", "s3_config_simple): config_uri = \"s3://testbucket/nested_2.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'[{\"test\": 88888}, {\"include\": \"simple.json\",", "real ugly\", }, \"impossible\": { \"bits\": 0, \"values\": { '0': False, '1': \"Woah!\"", "3000), }, { \"components\": { \"red\": {\"swir1\": 1.0}, \"green\": {\"nir\": 1.0}, \"blue\": {\"green\":", "True } }, \"color\": \"#f2dcb4\", }, { \"title\": \"Cloudy Water\", \"abstract\": \"\", \"flags\":", "they match if all of the conditions hold. \"flags\": {\"and\": {\"cloud_shadow\": True, \"high_slope\":", "}, { \"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"#4b4b37\", },", "def minimal_dc(): dc = MagicMock() nb = MagicMock() nb.index = ['band1', 'band2', 'band3',", "requests.get(MOTO_S3_ENDPOINT_URI) if r.ok: break except: pass timeout -= 0.1 time.sleep(0.1) yield proc.terminate() proc.wait()", "1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"swir1\": 1.0}, \"green\":", "import datetime import time from unittest.mock import MagicMock import numpy as np import", "Whether or not the file format supports multiple time slices. \"multi-time\": False },", "0}, \"floop\": {\"bits\": 1}, \"blat\": {\"bits\": 2}, \"pow\": {\"bits\": 3}, \"zap\": {\"bits\": 4},", "} mprod.definition = {\"storage\": {}} if 'nonativecrs' in s: pass elif 'badnativecrs' in", "0.6, \"color\": \"#e0e070\", }, { \"value\": 1.0, \"color\": \"#90FF90\", } ] }, {", "@pytest.fixture def minimal_layer_cfg(): return { \"title\": \"The Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\",", "datacube_ows.ogc import app with app.test_client() as client: yield client @pytest.fixture def minimal_dc(): dc", "\"band2\": \"red\"}, }, \"color_ramp\": [ {\"value\": -1.0, \"color\": \"#0000FF\"}, {\"value\": -0.2, \"color\": \"#005050\",", "MagicMock() flag_def = { \"moo\": {\"bits\": 0}, \"floop\": {\"bits\": 1}, \"blat\": {\"bits\": 2},", "}, \"styling\": { \"default_style\": \"band1\", \"styles\": [ { \"name\": \"band1\", \"title\": \"Single Band", "of the conditions hold. \"flags\": {\"and\": {\"cloud_shadow\": True, \"high_slope\": True}}, \"color\": \"DarkKhaki\", },", "minimal_layer_cfg(): return { \"title\": \"The Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"product_name\": \"foo\",", "\"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [ {\"value\": -1.0, \"color\": \"#0000FF\"}, {\"value\": -0.2, \"color\":", "\"bits\": 0, \"description\": \"No data\", \"values\": { '0': False, '1': True }, },", "}, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"enum\": 1, } ]", "= { \"foo_nativeres\": nb, \"foo_nonativeres\": nb, \"foo_badnativecrs\": nb, \"foo_nativecrs\": nb, \"foo_nonativecrs\": nb, \"foo\":", "\"Tasty\", }, \"description\": \"All splodgy looking\" }, \"splodgy\": { \"bits\": 2, \"values\": {", "}, } }) }) return output @pytest.fixture def dummy_raw_ls_data(): output = xr.Dataset({ \"red\":", "{ \"or\": { \"terrain_shadow\": True, \"low_solar_angle\": True }, }, \"color\": \"#2f2922\", }, {", "\"blue\": {\"blue\": 1.0}, }, \"scale_range\": (10, 800), }, { \"components\": { \"red\": {\"red\":", "if either of the conditions hold. \"flags\": {\"or\": {\"terrain_shadow\": True, \"high_slope\": True}}, \"color\":", "\"#f2dcb4\", }, { \"title\": \"Cloudy Water\", \"abstract\": \"\", \"flags\": { \"and\": { \"water_observed\":", "}, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"mapped_bands\": True, \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"},", "= { \"band1\": -999, \"band2\": -999, \"band3\": float(\"nan\"), \"band4\": \"nan\", } lmo =", "dc.index.products.get_by_name = product_by_name return dc @pytest.fixture def minimal_global_cfg(): global_cfg = MagicMock() global_cfg.keywords =", "None, }, \"EPSG:3577\": { \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\":", "{ \"description\": \"Cloudy\", \"bits\": 6, \"values\": { '0': False, '1': True }, },", "} }, { \"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\": [", "\"horizontal_coord\": \"longitude\", \"vertical_coord\": \"latitude\", \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/4326\", \"alias_of\": None, }, \"EPSG:3577\": { \"geographic\": False,", "\\ {\"test\": 22563, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 22564, \"thing\": {\"include\":", "MIME type of the image, as used in the Http Response. \"mime\": \"image/geotiff\",", "null}, \\ {\"test\": 22563, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 22564, \"thing\":", "+ 1.0) * 255 / 2).clip(0, 255) return scaled from datacube_ows.styles.api import scalable", "s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:9999\" elif 'nativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:4326\" else: pass", "\"\", \"flags\": { \"water_observed\": True, }, \"color\": \"#4f81bd\", }, { \"title\": \"Dry\", \"abstract\":", "1, 1), datetime.datetime(2010, 1, 2)] return { \"lat\": { \"min\": -0.1, \"max\": 0.1,", "adapted from https://github.com/dask/s3fs/blob/main/s3fs/tests/test_s3fs.py import subprocess proc = subprocess.Popen([\"moto_server\", \"s3\", \"-p\", MOTO_PORT]) timeout =", "s3_config_simple): config_uri = \"s3://testbucket/mixed_nested.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 9364, \\ \"subtest\":", "s: return None mprod = MagicMock() flag_def = { \"moo\": {\"bits\": 0}, \"floop\":", "# Scale to [-1.0 - 1.0] to [0 - 255] scaled = ((unscaled", "{ \"EPSG:3857\": { # Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", },", "\"geographic\": False, \"horizontal_coord\": \"hortizonal_cults\", \"vertical_coord\": \"verbal_tics\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/NATIVE_CRS\", \"alias_of\": None, },", "{ '0': False, '1': True } }, \"cloud_shadow\": { \"description\": \"Cloud shadow\", \"bits\":", "\"color\": \"Beige\", }, { \"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\":", "\"Terrain shadow\", \"bits\": 3, \"values\": { '0': False, '1': True }, }, \"high_slope\":", "np import pytest import requests import xarray import xarray as xr from s3fs.core", "(1500, 3700), }, \"green\": { \"nir\": 1.0, \"scale_range\": (1600, 3200), }, \"blue\": {\"green\":", "# writable local S3 system # adapted from https://github.com/dask/s3fs/blob/main/s3fs/tests/test_s3fs.py import subprocess proc =", "} @pytest.fixture def mock_range(): times = [datetime.datetime(2010, 1, 1), datetime.datetime(2010, 1, 2)] return", "\"green\": {\"red\": 1.0}, \"blue\": {\"red\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\":", "\"range\": [-1.0, 1.0] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\":", "{ \"red\": {\"red\": 1.0}, \"green\": { \"function\": scaled_ndvi, \"kwargs\": { \"scale_from\": (0.0, 1.0),", "\"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_mixed_nested(s3, s3_config_simple): config_uri = \"s3://testbucket/mixed_nested.json\" with s3.open(config_uri, \"wb\")", "@pytest.fixture def minimal_global_cfg(): global_cfg = MagicMock() global_cfg.keywords = {\"global\"} global_cfg.attribution.title = \"Global Attribution\"", "water observed \"band\": \"water\", \"flags\": { \"cloud\": True, \"water_observed\": False, }, \"invert\": True,", "\"color\": \"SaddleBrown\", }, ] } }, { \"value_map\": { \"water\": [ { #", "\"bits\": 4, \"values\": { '0': False, '1': True } }, \"cloud_shadow\": { \"description\":", "'0': \"Melancholic\", '1': \"Joyous\", }, \"description\": \"All splodgy looking\" }, \"flavour\": { \"bits\":", "{ \"value\": -1.0, \"color\": \"#000000\", \"alpha\": 0.0, }, { \"value\": 0.0, \"color\": \"#000000\",", "\"color\": \"#4f81bd\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"#96966e\",", "1.0, \"scale_range\": (1500, 3700), }, \"green\": { \"nir\": 1.0, \"scale_range\": (1600, 3200), },", "dummy_da(5, \"red\", coords, dtype=np.int16), \"green\": dummy_da(7, \"green\", coords, dtype=np.int16), \"blue\": dummy_da(2, \"blue\", coords,", "session = Session() return session.create_client(\"s3\", endpoint_url=MOTO_S3_ENDPOINT_URI) @pytest.fixture def s3_base(): # writable local S3", "= [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] output = xr.Dataset({ \"ir\": dim1_da(\"ir\", [800,", "\"blue\": {\"npv\": 1.0}}, \"scale_range\": [0.0, 100.0], }, { \"components\": { \"red\": {\"bs\": 1.0},", "parent = MagicMock() parent.abstract = \"Parent Abstract\" parent.keywords = {\"global\", \"parent\"} parent.attribution.title =", "[-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] output = xr.Dataset({ \"pq\": dim1_da(\"pq\", [0b01000, 0b11001,", "= (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) # Scale to [-1.0 -", "\"or\" - they match if either of the conditions hold. \"flags\": {\"or\": {\"terrain_shadow\":", "\"High Slopes\", \"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\": \"Brown\", }, { \"title\": \"Cloud\",", "{\"test\": 2573, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 2574, \"thing\": {\"include\": \"nested_3.json\",", "False, '1': True }, }, \"high_slope\": { \"description\": \"High slope\", \"bits\": 4, \"values\":", "\"#505050\", }, {\"value\": -0.01, \"color\": \"#303030\", }, {\"value\": 0.0, \"color\": \"black\", }, {\"value\":", "\"red\"}, }, \"color_ramp\": [ { \"value\": -1.0, \"color\": \"#000000\", \"alpha\": 0.0, }, {", "output = xr.Dataset({ \"bs\": dummy_da(546, \"bs\", coords, dtype=np.int16), \"pv\": dummy_da(723, \"pv\", coords, dtype=np.int16),", "\"vertical_coord_first\": True, \"horizontal_coord\": \"longitude\", \"vertical_coord\": \"latitude\", \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/4326\", \"alias_of\": None, }, \"EPSG:3577\": {", "# The MIME type of the image, as used in the Http Response.", "the Http Response. \"mime\": \"image/geotiff\", # The file extension to add to the", "\"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, \"blue\":", "def s3_config_nested_3(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_3.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 2222,", "Data Cube project. # See https://opendatacube.org for more information. # # Copyright (c)", "/ (data[\"nir\"] + data[\"red\"]) return [ { \"components\": { \"red\": {\"red\": 1.0}, \"green\":", "[ # Cloudy Slopes rule needs to come before the Cloud # and", "\"things\": [{\"test\": 22562, \"thing\": null}, \\ {\"test\": 22563, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}},", "0.3, \"color\": \"#703070\", }, { \"value\": 0.6, \"color\": \"#e0e070\", }, { \"value\": 1.0,", "def get_boto3_client(): from botocore.session import Session session = Session() return session.create_client(\"s3\", endpoint_url=MOTO_S3_ENDPOINT_URI) @pytest.fixture", "}, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"enum\": 1, \"invert\": True,", "import (MOTO_PORT, MOTO_S3_ENDPOINT_URI, coords, dim1_da, dummy_da) def get_boto3_client(): from botocore.session import Session session", "return { \"global\": { \"title\": \"Test Title\", \"info_url\": \"https://my.domain.com/about_us\", \"allowed_urls\": [ \"http://localhost\", \"http://unsecure.domain.com/odc\",", "\"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"RdYlGn\", \"range\": [-1.0, 1.0] }, { \"index_function\": {", "\"low_solar_angle\": { \"description\": \"Low solar incidence angle\", \"bits\": 2, \"values\": { '0': False,", "255) } }, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\":", "Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"#4b4b37\", }, { \"title\": \"Terrain Shadow", "0.333, \"green\": 0.333, \"blue\": 0.333, }, \"green\": {\"nir\": 1.0}, \"blue\": { \"swir1\": 0.5,", "\"foo_nonativecrs\": nb, \"foo\": nb, \"bar\": nb, } dc.list_measurements.return_value = lmo def product_by_name(s): if", "2222, \"things\": [{\"test\": 22562, \"thing\": null}, \\ {\"test\": 22563, \"thing\": {\"include\": \"simple.json\", \"type\":", "\"tif\", # Whether or not the file format supports multiple time slices. \"multi-time\":", "nb, \"foo\": nb, \"bar\": nb, } dc.list_measurements.return_value = lmo def product_by_name(s): if 'lookupfail'", "{\"npv\": 1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { # Mask out nodata", "\"color\": \"#000000\", \"alpha\": 0.0, }, { \"value\": 0.0, \"color\": \"#000000\", \"alpha\": 0.0, },", "match if either of the conditions hold. \"flags\": {\"or\": {\"terrain_shadow\": True, \"high_slope\": True}},", "{\"cloud\": True, \"high_slope\": True}}, \"color\": \"BurlyWood\", }, # Only matches non-cloudy high-slopes. {", "{ \"red\": {\"red\": 1.0}, \"green\": {\"red\": 1.0}, \"blue\": {\"red\": 1.0}, }, \"scale_range\": (50,", "\"water\", \"enum\": 1, \"invert\": True, } ] }, { \"components\": { \"red\": {\"bs\":", "\"Real, real ugly\", }, \"impossible\": { \"bits\": 0, \"values\": { '0': False, '1':", "{ \"title\": \"Cloudy Steep Terrain\", \"abstract\": \"\", \"flags\": { \"and\": { \"high_slope\": True,", "{\"nir\": 1.0}, \"blue\": { \"swir1\": 0.5, \"swir2\": 0.5, }, }, \"scale_range\": (50, 3000),", "The file extension to add to the filename. \"extension\": \"tif\", # Whether or", "global_cfg @pytest.fixture def minimal_parent(): parent = MagicMock() parent.abstract = \"Parent Abstract\" parent.keywords =", "\"ocean_r\", \"range\": [0.0, 1.0] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\",", "{\"include\": \"tests.cfg.simple.simple\", \"type\": \"python\"}, \\ \"test_json\": {\"include\": \"tests/cfg/simple.json\", \"type\": \"json\"}}}' ) @pytest.fixture def", "coords, dtype=np.uint8, attrs = { \"flags_definition\": { \"nodata\": { \"bits\": 0, \"description\": \"No", "1, \"invert\": True, }, { # Mask out pixels with low_solar_angle, high_slope #", "\"nodata\": True, }, }, \"alpha\": 0.0, \"color\": \"#ffffff\", }, { \"title\": \"Cloudy Steep", "\"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"green\", \"band2\": \"nir\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255)", "Title\", \"info_url\": \"https://my.domain.com/about_us\", \"allowed_urls\": [ \"http://localhost\", \"http://unsecure.domain.com/odc\", \"https://secure.domain.com/ows\", ], \"published_CRSs\": { \"EPSG:3857\": {", "\"mpl_ramp\": \"RdYlBu\", \"range\": [-1.0, 1.0], } ] } xyt_coords = [ (\"x\", [-1.0,", "{ \"min\": -0.1, \"max\": 0.1, }, \"lon\": { \"min\": -0.1, \"max\": 0.1, },", "\"foo\": nb, \"bar\": nb, } dc.list_measurements.return_value = lmo def product_by_name(s): if 'lookupfail' in", "OWS Contributors # SPDX-License-Identifier: Apache-2.0 import datetime import time from unittest.mock import MagicMock", "\"name\": \"band1\", \"title\": \"Single Band Test Style\", \"abstract\": \"\", \"components\": { \"red\": {\"band1\":", "return xarray.Dataset({ \"red\": dummy_da(1400, \"red\", xyt_coords, dtype=\"int16\"), \"green\": dummy_da(700, \"green\", xyt_coords, dtype=\"int16\"), \"blue\":", "-0.1, \"left\": -0.1, \"right\": 0.1, }, } } @pytest.fixture def minimal_global_raw_cfg(): return {", "NDVI (-1.0 to 1.0) unscaled = (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] + data[\"red\"])", "\"band3\": float(\"nan\"), \"band4\": \"nan\", } lmo = MagicMock() lmo.loc = { \"foo_nativeres\": nb,", "s: pass elif 'badnativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:9999\" elif 'nativecrs' in s:", "\"nc\", \"multi-time\": True, } }, \"native_format\": \"GeoTIFF\", } @pytest.fixture def dummy_raw_data(): output =", "coords, dtype=np.bool) @pytest.fixture def dummy_raw_calc_data(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0]", "{ \"water\": [ { # Make noncontiguous data transparent \"title\": \"\", \"abstract\": \"\",", "}, # Only matches non-cloudy high-slopes. { \"title\": \"High Slopes\", \"abstract\": \"\", \"flags\":", "\"folder.existing_folder\": MagicMock(), } return global_cfg @pytest.fixture def minimal_parent(): parent = MagicMock() parent.abstract =", "'1': \"Splodgy\", }, \"description\": \"All splodgy looking\" }, \"ugly\": { \"bits\": 1, \"values\":", "-0.1, \"max\": 0.1, }, \"times\": times, \"start_time\": times[0], \"end_time\": times[-1], \"time_set\": set(times), \"bboxes\":", "{\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (10, 800), },", "600, 200, 1000], dim_coords), \"red\": dim1_da(\"red\", [200, 500, 0, 200, 200, 700], dim_coords),", "} ] }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\":", "SPDX-License-Identifier: Apache-2.0 import datetime import time from unittest.mock import MagicMock import numpy as", "1.0}, \"blue\": {\"npv\": 1.0}}, \"scale_range\": [0.0, 100.0], }, { \"components\": { \"red\": {\"bs\":", "Low Sun Angle\", \"abstract\": \"\", \"flags\": { \"or\": { \"terrain_shadow\": True, \"low_solar_angle\": True", "\"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000),", "{ # Mask out pixels with cloud AND no water observed \"band\": \"water\",", "-1.0, \"color\": \"#000000\", \"alpha\": 0.0, }, { \"value\": 0.0, \"color\": \"#000000\", \"alpha\": 0.0,", "\"water\", \"flags\": { \"cloud\": True, \"water_observed\": False, }, \"invert\": True, }, ] }", "Only matches non-cloudy high-slopes. { \"title\": \"High Slopes\", \"abstract\": \"\", \"flags\": {\"high_slope\": True},", "\"abstract\": \"\", # Flag rules can contain an \"and\" - they match if", "\"#c2c1c0\", }, { \"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"#4b4b37\",", "True}, \"color\": \"Beige\", }, { \"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True},", "from datacube_ows.ogc import app with app.test_client() as client: yield client @pytest.fixture def minimal_dc():", "\"nodata\": { \"bits\": 0, \"description\": \"No data\", \"values\": { '0': False, '1': True", "\"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": { \"default_style\": \"band1\", \"styles\": [ { \"name\":", "1.0), \"scale_to\": (0, 255) } }, \"blue\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\":", "\"title\": \"Cloudy Steep Terrain\", \"abstract\": \"\", \"flags\": { \"and\": { \"high_slope\": True, \"cloud\":", "\"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"#4b4b37\", }, { \"title\": \"Terrain", "\"Water\", \"abstract\": \"\", \"flags\": { \"water_observed\": True, }, \"color\": \"#4f81bd\", }, { \"title\":", "{ # Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", }, \"EPSG:4326\": {", "\"values\": { '0': \"Melancholic\", '1': \"Joyous\", }, \"description\": \"All splodgy looking\" }, \"flavour\":", "\"red\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, \"blue\": { \"function\": \"datacube_ows.band_utils.norm_diff\",", "}, { \"title\": \"Terrain\", \"abstract\": \"\", # Flag rules can contain an \"or\"", "1.0}, \"alpha\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0,", "one EO band is missing or saturated\", \"bits\": 1, \"values\": { '0': False,", "def configs_for_wofs(): return [ { \"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": {", "\"water\", \"enum\": 1, \"invert\": True, }, { # Mask out pixels with low_solar_angle,", "\"lon\": { \"min\": -0.1, \"max\": 0.1, }, \"times\": times, \"start_time\": times[0], \"end_time\": times[-1],", "raw_calc_null_mask(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] return dim1_da(\"mask\", [True] *", "\"http://www.opengis.net/def/crs/EPSG/0/3577\", \"alias_of\": None, }, \"TEST:CRS\": { \"geographic\": False, \"horizontal_coord\": \"horrible_zonts\", \"vertical_coord\": \"vertex_calories\", \"vertical_coord_first\":", "\"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"mapped_bands\": True, \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\":", "WGS-84 \"geographic\": True, \"vertical_coord_first\": True }, }, }, \"layers\": [] } @pytest.fixture def", "\"abstract\": \"\", \"flags\": {\"and\": {\"cloud\": True, \"high_slope\": True}}, \"color\": \"BurlyWood\", }, # Only", "{\"test\": 2574, \"thing\": {\"include\": \"nested_3.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_mixed_nested(s3, s3_config_simple): config_uri", "\"vertex_calories\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/CRS\", \"alias_of\": None, }, \"TEST:NATIVE_CRS\": { \"geographic\": False, \"horizontal_coord\":", "\"noncontiguous\": { \"description\": \"At least one EO band is missing or saturated\", \"bits\":", "\"nir\": 1.0, \"scale_range\": (1600, 3200), }, \"blue\": {\"green\": 1.0}, }, \"scale_range\": (200, 1900),", "(0.0, 1.0), \"scale_to\": (0, 255) } }, }, \"scale_range\": (50, 3000), }, {", "def configs_for_combined_fc_wofs(): return [ { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0},", "}, { # Mask out pixels with cloud AND no water observed \"band\":", "{ '0': \"Bland\", '1': \"Tasty\", }, \"description\": \"All splodgy looking\" }, \"splodgy\": {", "\"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\":", "\"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": { \"function\":", "splodgy looking\" }, \"flavour\": { \"bits\": 3, \"values\": { '0': \"Bland\", '1': \"Tasty\",", "an \"or\" - they match if either of the conditions hold. \"flags\": {\"or\":", "{\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0}}, \"scale_range\": [0.0, 100.0], }, {", "\"multi_date\": [ { \"allowed_count_range\": [2, 2], \"preserve_user_date_order\": True, \"aggregator_function\": { \"function\": \"datacube_ows.band_utils.multi_date_delta\" },", "{ \"components\": { \"red\": { \"swir1\": 1.0, \"scale_range\": (1500, 3700), }, \"green\": {", "def dummy_raw_fc_plus_wo(dummy_raw_fc_data, dummy_raw_wo_data): return xarray.combine_by_coords( [dummy_raw_fc_data, dummy_raw_wo_data], join=\"exact\") @pytest.fixture def configs_for_landsat(): def ndvi(data):", "= \"Global Abstract\" global_cfg.authorities = { \"auth0\": \"http://test.url/auth0\", \"auth1\": \"http://test.url/auth1\", } global_cfg.published_CRSs =", "\"#4b4b37\", }, { \"title\": \"Terrain Shadow or Low Sun Angle\", \"abstract\": \"\", \"flags\":", "1.0}, }, \"scale_range\": (200, 1900), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\":", "-0.1, \"right\": 0.1, }, } } @pytest.fixture def minimal_global_raw_cfg(): return { \"global\": {", "\"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3857\", \"alias_of\": None, }, \"EPSG:4326\": { # WGS-84 \"geographic\": True,", "(\"x\", [-1.0, -0.5, 0.0, 0.5, 1.0]), (\"y\", [-1.0, -0.5, 0.0, 0.5, 1.0]), (\"time\",", "\"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\":", "3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\":", "\"s3://testbucket/mixed_nested.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 9364, \\ \"subtest\": {\"test_py\": {\"include\": \"tests.cfg.simple.simple\",", "\"python\"}, \\ \"test_json\": {\"include\": \"tests/cfg/simple.json\", \"type\": \"json\"}}}' ) @pytest.fixture def flask_client(monkeypatch): monkeypatch.setenv(\"DEFER_CFG_PARSE\", \"yes\")", "the Cloud # and High Slopes rules. { \"title\": \"Cloudy Slopes\", \"abstract\": \"\",", "0.1 time.sleep(0.1) yield proc.terminate() proc.wait() @pytest.fixture() def s3(s3_base, monkeypatch): monkeypatch.setenv(\"AWS_ACCESS_KEY_ID\", \"foo\") monkeypatch.setenv(\"AWS_SECRET_ACCESS_KEY\", \"bar\")", "0.5, \"color\": \"#707030\", }, {\"value\": 1.0, \"color\": \"#FF9090\", }, ] }, { \"index_function\":", "scaled_ndvi(data): # Calculate NDVI (-1.0 to 1.0) return (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"]", "{ \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0}}, \"scale_range\":", "1.0}, \"green\": { \"function\": scaled_ndvi, \"kwargs\": { \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255)", "\"band2\": \"red\"}, }, \"color_ramp\": [ { \"value\": -1.0, \"color\": \"#000000\", \"alpha\": 0.0, },", "\"scale_range\": (1600, 3200), }, \"blue\": {\"green\": 1.0}, }, \"scale_range\": (200, 1900), }, {", "@pytest.fixture def dummy_raw_data(): output = xr.Dataset({ \"ir\": dummy_da(3, \"ir\", coords), \"red\": dummy_da(5, \"red\",", "\"title\": \"Cloudy Water\", \"abstract\": \"\", \"flags\": { \"and\": { \"water_observed\": True, \"cloud\": True", "Abstract\" global_cfg.authorities = { \"auth0\": \"http://test.url/auth0\", \"auth1\": \"http://test.url/auth1\", } global_cfg.published_CRSs = { \"EPSG:3857\":", "global_cfg.contact_position = None global_cfg.abstract = \"Global Abstract\" global_cfg.authorities = { \"auth0\": \"http://test.url/auth0\", \"auth1\":", "dummy_raw_wo_data], join=\"exact\") @pytest.fixture def configs_for_landsat(): def ndvi(data): # Calculate NDVI (-1.0 to 1.0)", "\"swir2\": 0.5, }, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\":", "\"a_layer\", \"multi_product\": True, \"product_names\": [\"foo\", \"bar\"], \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": {", "s3.open(config_uri, \"wb\") as f_open: f_open.write(b'[{\"test\": 88888}, {\"include\": \"simple.json\", \"type\": \"json\"}]') @pytest.fixture def s3_config_nested_3(s3,", "\"description\": \"No data\", \"values\": { '0': False, '1': True }, }, \"noncontiguous\": {", "\"swir1\": 1.0, \"scale_range\": (1500, 3700), }, \"green\": { \"nir\": 1.0, \"scale_range\": (1600, 3200),", "\"foo\") monkeypatch.setenv(\"AWS_SECRET_ACCESS_KEY\", \"bar\") client = get_boto3_client() client.create_bucket(Bucket=\"testbucket\") S3FileSystem.clear_instance_cache() s3 = S3FileSystem(anon=False, client_kwargs={\"endpoint_url\": MOTO_S3_ENDPOINT_URI})", "{\"or\": {\"terrain_shadow\": True, \"high_slope\": True}}, \"color\": \"SlateGray\", }, { \"title\": \"Cloud Shadow and", "\"-p\", MOTO_PORT]) timeout = 5 while timeout > 0: try: r = requests.get(MOTO_S3_ENDPOINT_URI)", "[{\"test\": 2572, \"thing\": null}, \\ {\"test\": 2573, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\", "[ (\"x\", [-1.0, -0.5, 0.0, 0.5, 1.0]), (\"y\", [-1.0, -0.5, 0.0, 0.5, 1.0]),", "0b111], dim_coords, attrs={ \"flags_definition\": { \"splodgy\": { \"bits\": 2, \"values\": { '0': \"Splodgeless\",", "True }, }, \"water_observed\": { \"description\": \"Classified as water by the decision tree\",", "1.0}, \"green\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0,", "\"\", \"flags\": { \"or\": { \"noncontiguous\": True, \"nodata\": True, }, }, \"alpha\": 0.0,", "\"wb\") as f_open: f_open.write(b'{\"test\": 3222, \"things\": [{\"test\": 2572, \"thing\": null}, \\ {\"test\": 2573,", "@pytest.fixture def dummy_raw_fc_plus_wo(dummy_raw_fc_data, dummy_raw_wo_data): return xarray.combine_by_coords( [dummy_raw_fc_data, dummy_raw_wo_data], join=\"exact\") @pytest.fixture def configs_for_landsat(): def", "'0': False, '1': True }, }, \"low_solar_angle\": { \"description\": \"Low solar incidence angle\",", "{ \"title\": \"Cloudy Water\", \"abstract\": \"\", \"flags\": { \"and\": { \"water_observed\": True, \"cloud\":", "\"flags\": { \"cloud\": True, \"water_observed\": False, }, \"invert\": True, }, ] } ]", "1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [", "\"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\": [ { \"title\": \"Water\", \"abstract\": \"\", \"flags\":", "True, } ] }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0},", "\"red\", coords, dtype=np.uint8, attrs = { \"flags_definition\": { \"nodata\": { \"bits\": 0, \"description\":", "\"vertical_coord_first\": True }, }, }, \"layers\": [] } @pytest.fixture def wcs_global_cfg(): return {", "dim1_da(\"blue\", [200, 500, 1000, 600, 100, 700], dim_coords), \"uv\": dim1_da(\"uv\", [400, 600, 900,", "{\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000, 3000), }, { \"components\": {", "output = xr.Dataset({ \"water\": dummy_da(0b101, \"red\", coords, dtype=np.uint8, attrs = { \"flags_definition\": {", "(-1.0 to 1.0) return (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) return [", "[ { \"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\": [ {", "False, \"cloud_shadow\": False, } }, { # Mask out pixels with cloud AND", "nodata pixels. \"band\": \"water\", \"enum\": 1, \"invert\": True, }, { # Mask out", "datacube-ows, part of the Open Data Cube project. # See https://opendatacube.org for more", "\"high_slope\": False, \"cloud_shadow\": False, } }, { # Mask out pixels with cloud", "\"blue\": {\"green\": 1.0}, }, \"scale_range\": (200, 1900), }, { \"components\": { \"red\": {\"red\":", "local S3 system # adapted from https://github.com/dask/s3fs/blob/main/s3fs/tests/test_s3fs.py import subprocess proc = subprocess.Popen([\"moto_server\", \"s3\",", "\"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"ocean_r\", \"range\": [0.0, 1.0] },", "@pytest.fixture def minimal_global_raw_cfg(): return { \"global\": { \"title\": \"Test Title\", \"info_url\": \"https://my.domain.com/about_us\", \"allowed_urls\":", "\"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000, 3000),", "coords, dtype=np.int16), \"swir2\": dummy_da(1051, \"swir2\", coords, dtype=np.int16), }) return output @pytest.fixture def dummy_raw_wo_data():", "as f_open: f_open.write(b'{\"test\": 1234}') @pytest.fixture def s3_config_nested_1(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_1.json\" with s3.open(config_uri,", "700], dim_coords), \"green\": dim1_da(\"green\", [100, 500, 0, 400, 300, 200], dim_coords), \"blue\": dim1_da(\"blue\",", "}, \"mpl_ramp\": \"RdYlBu\", \"range\": [-1.0, 1.0], } ] } xyt_coords = [ (\"x\",", "slope\", \"bits\": 4, \"values\": { '0': False, '1': True } }, \"cloud_shadow\": {", "\"band1\", \"styles\": [ { \"name\": \"band1\", \"title\": \"Single Band Test Style\", \"abstract\": \"\",", "900, 200, 400, 100], dim_coords), \"pq\": dim1_da(\"pq\", [0b000, 0b001, 0b010, 0b011, 0b100, 0b111],", "if all of the conditions hold. \"flags\": {\"and\": {\"cloud_shadow\": True, \"high_slope\": True}}, \"color\":", "(1000, 3000), }, { \"components\": { \"red\": { \"swir1\": 1.0, \"scale_range\": (1500, 3700),", "\"scale_range\": [0.0, 100.0], }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0},", "yield proc.terminate() proc.wait() @pytest.fixture() def s3(s3_base, monkeypatch): monkeypatch.setenv(\"AWS_ACCESS_KEY_ID\", \"foo\") monkeypatch.setenv(\"AWS_SECRET_ACCESS_KEY\", \"bar\") client =", "}, \"color\": \"#bad4f2\", }, { \"title\": \"Shaded Water\", \"abstract\": \"\", \"flags\": { \"and\":", "}, { \"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\": [ #", "\"description\": \"All splodgy looking\" }, \"splodgy\": { \"bits\": 2, \"values\": { '0': \"Splodgeless\",", "{ \"swir1\": 0.5, \"swir2\": 0.5, }, }, \"scale_range\": (50, 3000), }, { \"components\":", "\"Aqua\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"SaddleBrown\", },", "output = xr.Dataset({ \"red\": dummy_da(5, \"red\", coords, dtype=np.int16), \"green\": dummy_da(7, \"green\", coords, dtype=np.int16),", "\"\", \"flags\": {\"water_observed\": False}, \"color\": \"#96966e\", }, ] }, }, ] @pytest.fixture def", "}, ] @pytest.fixture def configs_for_combined_fc_wofs(): return [ { \"components\": { \"red\": {\"bs\": 1.0},", "False, \"gml_name\": \"TEST/CRS\", \"alias_of\": None, }, \"TEST:NATIVE_CRS\": { \"geographic\": False, \"horizontal_coord\": \"hortizonal_cults\", \"vertical_coord\":", "3}, \"zap\": {\"bits\": 4}, \"dang\": {\"bits\": 5}, } mprod.lookup_measurements.return_value = { \"band4\": {", "output @pytest.fixture def dummy_raw_fc_data(): output = xr.Dataset({ \"bs\": dummy_da(546, \"bs\", coords, dtype=np.int16), \"pv\":", "return mprod dc.index.products.get_by_name = product_by_name return dc @pytest.fixture def minimal_global_cfg(): global_cfg = MagicMock()", "\"vertical_coord\": \"vertex_calories\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/CRS\", \"alias_of\": None, }, \"TEST:NATIVE_CRS\": { \"geographic\": False,", "Mask out pixels with cloud AND no water observed \"band\": \"water\", \"flags\": {", "\"scale_to\": (0, 255) } }, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), },", "\"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, }, \"scale_range\": (50, 3000), },", "coords, dtype=np.int16), \"green\": dummy_da(7, \"green\", coords, dtype=np.int16), \"blue\": dummy_da(2, \"blue\", coords, dtype=np.int16), \"nir\":", "{\"green\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0},", "\"flags_definition\": { \"splodgy\": { \"bits\": 2, \"values\": { '0': \"Splodgeless\", '1': \"Splodgy\", },", "0.1, \"color\": \"#000030\", \"alpha\": 1.0, }, { \"value\": 0.3, \"color\": \"#703070\", }, {", "happen.\", }, } }) }) return output @pytest.fixture def dummy_raw_ls_data(): output = xr.Dataset({", "None, }, \"TEST:NATIVE_CRS\": { \"geographic\": False, \"horizontal_coord\": \"hortizonal_cults\", \"vertical_coord\": \"verbal_tics\", \"vertical_coord_first\": False, \"gml_name\":", "100.0], }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\":", "\"blue\": { \"swir1\": 0.5, \"swir2\": 0.5, }, }, \"scale_range\": (50, 3000), }, {", "\"color\": \"#90FF90\", } ] }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\":", "{\"bits\": 5}, } mprod.lookup_measurements.return_value = { \"band4\": { \"flags_definition\": flag_def } } mprod.definition", "configs_for_landsat(): def ndvi(data): # Calculate NDVI (-1.0 to 1.0) unscaled = (data[\"nir\"] -", "\"flags\": { \"nodata\": False, \"noncontiguous\": False, \"terrain_shadow\": False, \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\":", "with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 2222, \"things\": [{\"test\": 22562, \"thing\": null}, \\", "\"color\": \"BurlyWood\", }, # Only matches non-cloudy high-slopes. { \"title\": \"High Slopes\", \"abstract\":", "\"red\"}, }, \"mpl_ramp\": \"RdYlGn\", \"range\": [-1.0, 1.0] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\",", "\"water\": [ { \"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", },", "\"flags\": { \"or\": { \"noncontiguous\": True, \"nodata\": True, }, }, \"alpha\": 0.0, \"color\":", "{\"high_slope\": True}, \"color\": \"#776857\", }, { \"title\": \"Water\", \"abstract\": \"\", \"flags\": { \"water_observed\":", "parent.abstract = \"Parent Abstract\" parent.keywords = {\"global\", \"parent\"} parent.attribution.title = \"Parent Attribution\" return", "can contain an \"or\" - they match if either of the conditions hold.", "\"values\": { '0': False, '1': True }, }, \"high_slope\": { \"description\": \"High slope\",", "1.0}, \"blue\": { \"swir1\": 0.5, \"swir2\": 0.5, }, }, \"scale_range\": (50, 3000), },", "\"function\": \"datacube_ows.band_utils.multi_date_delta\" }, \"mpl_ramp\": \"RdYlBu\", \"range\": [-1.0, 1.0], } ] } xyt_coords =", "\"geographic\": True, \"vertical_coord_first\": True }, }, }, \"layers\": [] } @pytest.fixture def wcs_global_cfg():", "\"foo_nonativeres\": nb, \"foo_badnativecrs\": nb, \"foo_nativecrs\": nb, \"foo_nonativecrs\": nb, \"foo\": nb, \"bar\": nb, }", "client @pytest.fixture def minimal_dc(): dc = MagicMock() nb = MagicMock() nb.index = ['band1',", "config_uri = \"s3://testbucket/simple.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 1234}') @pytest.fixture def s3_config_nested_1(s3,", "{ \"components\": { \"red\": {\"red\": 1.0}, \"green\": ndvi, \"blue\": {\"blue\": 1.0}, }, \"scale_range\":", "= [datetime.datetime(2010, 1, 1), datetime.datetime(2010, 1, 2)] return { \"lat\": { \"min\": -0.1,", "\"values\": { '0': False, '1': True }, }, \"cloud\": { \"description\": \"Cloudy\", \"bits\":", "False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3577\", \"alias_of\": None, }, \"TEST:CRS\": { \"geographic\": False, \"horizontal_coord\": \"horrible_zonts\", \"vertical_coord\":", "\"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [ {\"value\": -1.0, \"color\": \"#0000FF\"}, {\"value\":", "{ \"moo\": {\"bits\": 0}, \"floop\": {\"bits\": 1}, \"blat\": {\"bits\": 2}, \"pow\": {\"bits\": 3},", "more information. # # Copyright (c) 2017-2021 OWS Contributors # SPDX-License-Identifier: Apache-2.0 import", "{ # Make noncontiguous data transparent \"title\": \"\", \"abstract\": \"\", \"flags\": { \"or\":", "1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, {", "null}, \\ {\"test\": 2573, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 2574, \"thing\":", "@pytest.fixture def raw_calc_null_mask(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] return dim1_da(\"mask\",", "}) return output @pytest.fixture def dummy_raw_ls_data(): output = xr.Dataset({ \"red\": dummy_da(5, \"red\", coords,", "\"latitude\", \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/4326\", \"alias_of\": None, }, \"EPSG:3577\": { \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\":", "}, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0}", "{ \"water_observed\": True, \"cloud_shadow\": True } }, \"color\": \"#335277\", }, { \"title\": \"Cloud\",", "\"allowed_count_range\": [2, 2], \"preserve_user_date_order\": True, \"aggregator_function\": { \"function\": \"datacube_ows.band_utils.multi_date_delta\" }, \"mpl_ramp\": \"RdYlBu\", \"range\":", "False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3857\", \"alias_of\": None, }, \"EPSG:4326\": { # WGS-84 \"geographic\": True, \"vertical_coord_first\":", "} @pytest.fixture def wcs_global_cfg(): return { \"formats\": { # Key is the format", "True}, \"color\": \"#4b4b37\", }, { \"title\": \"Terrain Shadow or Low Sun Angle\", \"abstract\":", "\"color\": \"#0000FF\"}, {\"value\": -0.2, \"color\": \"#005050\", }, {\"value\": -0.1, \"color\": \"#505050\", }, {\"value\":", "= {\"global\"} global_cfg.attribution.title = \"Global Attribution\" global_cfg.contact_org = None global_cfg.contact_position = None global_cfg.abstract", "}, ] } ] @pytest.fixture def multi_date_cfg(): return { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\",", "0, 200, 200, 700], dim_coords), \"green\": dim1_da(\"green\", [100, 500, 0, 400, 300, 200],", "}, \"description\": \"All splodgy looking\" }, \"ugly\": { \"bits\": 1, \"values\": { '0':", "}, \"cloud_shadow\": { \"description\": \"Cloud shadow\", \"bits\": 5, \"values\": { '0': False, '1':", "'1': True }, \"description\": \"Real, real ugly\", }, \"impossible\": { \"bits\": 0, \"values\":", "[0b000, 0b001, 0b010, 0b011, 0b100, 0b111], dim_coords, attrs={ \"flags_definition\": { \"splodgy\": { \"bits\":", "{ \"allowed_count_range\": [2, 2], \"preserve_user_date_order\": True, \"aggregator_function\": { \"function\": \"datacube_ows.band_utils.multi_date_delta\" }, \"mpl_ramp\": \"RdYlBu\",", "else: pass if 'nonativeres' in s: pass elif 'nativeres' in s: mprod.definition[\"storage\"][\"resolution\"] =", "'1': True }, }, \"water_observed\": { \"description\": \"Classified as water by the decision", "# The file extension to add to the filename. \"extension\": \"tif\", # Whether", "supports multiple time slices. \"multi-time\": False }, \"netCDF\": { \"renderer\": \"datacube_ows.wcs_utils.get_netcdf\", \"mime\": \"application/x-netcdf\",", "\"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000, 8000),", "minimal_global_cfg(): global_cfg = MagicMock() global_cfg.keywords = {\"global\"} global_cfg.attribution.title = \"Global Attribution\" global_cfg.contact_org =", "can contain an \"and\" - they match if all of the conditions hold.", "the conditions hold. \"flags\": {\"and\": {\"cloud_shadow\": True, \"high_slope\": True}}, \"color\": \"DarkKhaki\", }, {", "slices. \"multi-time\": False }, \"netCDF\": { \"renderer\": \"datacube_ows.wcs_utils.get_netcdf\", \"mime\": \"application/x-netcdf\", \"extension\": \"nc\", \"multi-time\":", "44, 5), datetime.datetime.now() ]) ] @pytest.fixture def xyt_dummydata(): return xarray.Dataset({ \"red\": dummy_da(1400, \"red\",", "1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, \"alpha\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {", "4, \"values\": { '0': False, '1': True } }, \"cloud_shadow\": { \"description\": \"Cloud", "\"Cloud shadow\", \"bits\": 5, \"values\": { '0': False, '1': True }, }, \"cloud\":", "0.333, }, \"green\": {\"nir\": 1.0}, \"blue\": { \"swir1\": 0.5, \"swir2\": 0.5, }, },", "}, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\":", "project. # See https://opendatacube.org for more information. # # Copyright (c) 2017-2021 OWS", "{ \"band1\": -999, \"band2\": -999, \"band3\": float(\"nan\"), \"band4\": \"nan\", } lmo = MagicMock()", "nb = MagicMock() nb.index = ['band1', 'band2', 'band3', 'band4'] nb.__getitem__.return_value = { \"band1\":", "}, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { # Mask out nodata pixels. \"band\":", "{ \"title\": \"Steep Terrain\", \"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\": \"#776857\", }, {", "\"name\": \"a_layer\", \"product_name\": \"foo\", \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": { \"default_style\": \"band1\",", "True, \"aggregator_function\": { \"function\": \"datacube_ows.band_utils.multi_date_delta\" }, \"mpl_ramp\": \"RdYlBu\", \"range\": [-1.0, 1.0], } ]", "\"uv\": dummy_da(-1, \"uv\", coords), }) return output @pytest.fixture def null_mask(): return dummy_da(True, \"mask\",", "}, \"scale_range\": [0, 1024] } ] } } @pytest.fixture def minimal_multiprod_cfg(): return {", "{\"red\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": { \"red\":", "{ \"EPSG:4326\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, \"EPSG:3577\": {\"top\":", "dummy_da(700, \"green\", xyt_coords, dtype=\"int16\"), \"blue\": dummy_da(1500, \"blue\", xyt_coords, dtype=\"int16\"), \"nir\": dummy_da(2000, \"nir\", xyt_coords,", "dummy_da(3, \"ir\", coords), \"red\": dummy_da(5, \"red\", coords), \"green\": dummy_da(7, \"green\", coords), \"blue\": dummy_da(2,", "}, { \"value\": 0.6, \"color\": \"#e0e070\", }, { \"value\": 1.0, \"color\": \"#90FF90\", }", "dummy_da(101, \"nir\", coords, dtype=np.int16), \"swir1\": dummy_da(1051, \"swir1\", coords, dtype=np.int16), \"swir2\": dummy_da(1051, \"swir2\", coords,", "dim_coords) @pytest.fixture def dummy_col_map_data(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] output", "dtype=np.uint8, attrs = { \"flags_definition\": { \"nodata\": { \"bits\": 0, \"description\": \"No data\",", "\"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255)", "parent @pytest.fixture def minimal_layer_cfg(): return { \"title\": \"The Title\", \"abstract\": \"The Abstract\", \"name\":", "\"description\": \"All splodgy looking\" }, \"ugly\": { \"bits\": 1, \"values\": { '0': False,", "# Key is the format name, as used in DescribeCoverage XML \"GeoTIFF\": {", "}, \"description\": \"Won't happen. Can't happen. Might happen.\", }, } }) }) return", "return xarray.combine_by_coords( [dummy_raw_fc_data, dummy_raw_wo_data], join=\"exact\") @pytest.fixture def configs_for_landsat(): def ndvi(data): # Calculate NDVI", "{\"and\": {\"cloud\": True, \"high_slope\": True}}, \"color\": \"BurlyWood\", }, # Only matches non-cloudy high-slopes.", "\"description\": \"Low solar incidence angle\", \"bits\": 2, \"values\": { '0': False, '1': True", "except: pass timeout -= 0.1 time.sleep(0.1) yield proc.terminate() proc.wait() @pytest.fixture() def s3(s3_base, monkeypatch):", "True, \"horizontal_coord\": \"longitude\", \"vertical_coord\": \"latitude\", \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/4326\", \"alias_of\": None, }, \"EPSG:3577\": { \"geographic\":", "\"mask\", coords, dtype=np.bool) @pytest.fixture def dummy_raw_calc_data(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0,", "}, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"red\": 1.0}, \"blue\": {\"red\": 1.0},", "\"Classified as water by the decision tree\", \"bits\": 7, \"values\": { '0': False,", "}, \"color_ramp\": [ { \"value\": -1.0, \"color\": \"#000000\", \"alpha\": 0.0, }, { \"value\":", "\"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [ {\"value\": -1.0, \"color\": \"#0000FF\"},", "\"pq_masks\": [ { \"band\": \"water\", \"flags\": { \"nodata\": False, \"noncontiguous\": False, \"terrain_shadow\": False,", "{ \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } },", "\"color\": \"#f2dcb4\", }, { \"title\": \"Cloudy Water\", \"abstract\": \"\", \"flags\": { \"and\": {", "return { \"formats\": { # Key is the format name, as used in", "3222, \"things\": [{\"test\": 2572, \"thing\": null}, \\ {\"test\": 2573, \"thing\": {\"include\": \"simple.json\", \"type\":", "\"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", }, \"EPSG:4326\": { # WGS-84 \"geographic\": True, \"vertical_coord_first\": True", "# SPDX-License-Identifier: Apache-2.0 import datetime import time from unittest.mock import MagicMock import numpy", "dtype=np.int16), \"npv\": dummy_da(209, \"npv\", coords, dtype=np.int16), }) return output @pytest.fixture def dummy_raw_fc_plus_wo(dummy_raw_fc_data, dummy_raw_wo_data):", "Shadow and High Slope\", \"abstract\": \"\", # Flag rules can contain an \"and\"", "\"http://test.url/auth1\", } global_cfg.published_CRSs = { \"EPSG:3857\": { # Web Mercator \"geographic\": False, \"horizontal_coord\":", "saturated\", \"bits\": 1, \"values\": { '0': False, '1': True }, }, \"low_solar_angle\": {", "-0.01, \"color\": \"#303030\", }, {\"value\": 0.0, \"color\": \"black\", }, {\"value\": 0.01, \"color\": \"#303000\",", "-3.0] output = xr.Dataset({ \"pq\": dim1_da(\"pq\", [0b01000, 0b11001, 0b01010, 0b10011, 0b00100, 0b10111], dim_coords,", "datetime.datetime.now() ]) ] @pytest.fixture def xyt_dummydata(): return xarray.Dataset({ \"red\": dummy_da(1400, \"red\", xyt_coords, dtype=\"int16\"),", "1000, 600, 200, 1000], dim_coords), \"red\": dim1_da(\"red\", [200, 500, 0, 200, 200, 700],", "rules can contain an \"and\" - they match if all of the conditions", "\"red\": {\"red\": 1.0}, \"green\": ndvi, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), },", "\"color\": \"Beige\", }, { \"title\": \"Terrain\", \"abstract\": \"\", # Flag rules can contain", "xarray.Dataset({ \"red\": dummy_da(1400, \"red\", xyt_coords, dtype=\"int16\"), \"green\": dummy_da(700, \"green\", xyt_coords, dtype=\"int16\"), \"blue\": dummy_da(1500,", "\"abstract\": \"Observations\", \"value_map\": { \"water\": [ # Cloudy Slopes rule needs to come", "'band3', 'band4'] nb.__getitem__.return_value = { \"band1\": -999, \"band2\": -999, \"band3\": float(\"nan\"), \"band4\": \"nan\",", "needs to come before the Cloud # and High Slopes rules. { \"title\":", "}, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {", "{ \"description\": \"Low solar incidence angle\", \"bits\": 2, \"values\": { '0': False, '1':", "monkeypatch.setenv(\"AWS_ACCESS_KEY_ID\", \"foo\") monkeypatch.setenv(\"AWS_SECRET_ACCESS_KEY\", \"bar\") client = get_boto3_client() client.create_bucket(Bucket=\"testbucket\") S3FileSystem.clear_instance_cache() s3 = S3FileSystem(anon=False, client_kwargs={\"endpoint_url\":", "def dummy_raw_data(): output = xr.Dataset({ \"ir\": dummy_da(3, \"ir\", coords), \"red\": dummy_da(5, \"red\", coords),", "\"json\"}]') @pytest.fixture def s3_config_nested_3(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_3.json\" with s3.open(config_uri, \"wb\") as f_open:", "(0, 255) } }, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, {", "coords, dim1_da, dummy_da) def get_boto3_client(): from botocore.session import Session session = Session() return", "0b10011, 0b00100, 0b10111], dim_coords, attrs={ \"flags_definition\": { \"joviality\": { \"bits\": 3, \"values\": {", "yield client @pytest.fixture def minimal_dc(): dc = MagicMock() nb = MagicMock() nb.index =", "\"EPSG:4326\": { # WGS-84 \"geographic\": True, \"vertical_coord_first\": True }, }, }, \"layers\": []", "3, \"values\": { '0': \"Melancholic\", '1': \"Joyous\", }, \"description\": \"All splodgy looking\" },", "\"zap\": {\"bits\": 4}, \"dang\": {\"bits\": 5}, } mprod.lookup_measurements.return_value = { \"band4\": { \"flags_definition\":", "# or cloud shadow. \"band\": \"water\", \"flags\": { \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\":", "{\"swir1\": 1.0}, \"green\": {\"nir\": 1.0}, \"blue\": {\"green\": 1.0}, }, \"scale_range\": (50, 3000), },", "to the filename. \"extension\": \"tif\", # Whether or not the file format supports", "mprod = MagicMock() flag_def = { \"moo\": {\"bits\": 0}, \"floop\": {\"bits\": 1}, \"blat\":", "255) return scaled from datacube_ows.styles.api import scalable @scalable def scaled_ndvi(data): # Calculate NDVI", "'band2', 'band3', 'band4'] nb.__getitem__.return_value = { \"band1\": -999, \"band2\": -999, \"band3\": float(\"nan\"), \"band4\":", "f_open: f_open.write(b'{\"test\": 2222, \"things\": [{\"test\": 22562, \"thing\": null}, \\ {\"test\": 22563, \"thing\": {\"include\":", "{ \"nodata\": False, \"noncontiguous\": False, \"terrain_shadow\": False, \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False,", "\"alias_of\": None, }, \"TEST:NATIVE_CRS\": { \"geographic\": False, \"horizontal_coord\": \"hortizonal_cults\", \"vertical_coord\": \"verbal_tics\", \"vertical_coord_first\": False,", "of the conditions hold. \"flags\": {\"or\": {\"terrain_shadow\": True, \"high_slope\": True}}, \"color\": \"SlateGray\", },", "\"bboxes\": { \"EPSG:4326\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, \"EPSG:3577\":", "1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000, 3000), }, {", "\"enum\": 1, \"invert\": True, } ] }, { \"components\": { \"red\": {\"bs\": 1.0},", "1.0}, }, \"scale_range\": (1000, 8000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\":", "False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3857\", \"alias_of\": None, }, \"EPSG:4326\":", "{ \"red\": {\"red\": 1.0}, \"green\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\":", "\"components\": { \"red\": {\"band1\": 1.0}, \"green\": {\"band1\": 1.0}, \"blue\": {\"band1\": 1.0}, }, \"scale_range\":", "\"flags\": {\"water_observed\": False}, \"color\": \"Brown\", }, ] } }, { \"name\": \"observations\", \"title\":", "}, {\"value\": 0.0, \"color\": \"black\", }, {\"value\": 0.01, \"color\": \"#303000\", }, {\"value\": 0.5,", "\"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, \"alpha\": {", "[-1.0 - 1.0] to [0 - 255] scaled = ((unscaled + 1.0) *", "{ \"high_slope\": True, \"cloud\": True } }, \"color\": \"#f2dcb4\", }, { \"title\": \"Cloudy", "False, '1': True }, }, \"low_solar_angle\": { \"description\": \"Low solar incidence angle\", \"bits\":", "{ \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 0.5), \"scale_to\":", "\"High slope\", \"bits\": 4, \"values\": { '0': False, '1': True } }, \"cloud_shadow\":", "@pytest.fixture() def s3(s3_base, monkeypatch): monkeypatch.setenv(\"AWS_ACCESS_KEY_ID\", \"foo\") monkeypatch.setenv(\"AWS_SECRET_ACCESS_KEY\", \"bar\") client = get_boto3_client() client.create_bucket(Bucket=\"testbucket\") S3FileSystem.clear_instance_cache()", "(data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) return [ { \"components\": { \"red\":", "https://opendatacube.org for more information. # # Copyright (c) 2017-2021 OWS Contributors # SPDX-License-Identifier:", "all of the conditions hold. \"flags\": {\"and\": {\"cloud_shadow\": True, \"high_slope\": True}}, \"color\": \"DarkKhaki\",", "\"latitude\": 0.001, \"longitude\": 0.001, } else: pass return mprod dc.index.products.get_by_name = product_by_name return", "'1': True }, }, \"cloud\": { \"description\": \"Cloudy\", \"bits\": 6, \"values\": { '0':", "\"yes\") from datacube_ows.ogc import app with app.test_client() as client: yield client @pytest.fixture def", "out nodata pixels. \"band\": \"water\", \"enum\": 1, \"invert\": True, }, { # Mask", "xyt_coords, dtype=\"int16\"), \"blue\": dummy_da(1500, \"blue\", xyt_coords, dtype=\"int16\"), \"nir\": dummy_da(2000, \"nir\", xyt_coords, dtype=\"int16\"), })", "\"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"Beige\", }, { \"title\": \"Terrain\", \"abstract\":", "s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:4326\" else: pass if 'nonativeres' in s: pass elif 'nativeres'", "1.0}, }, \"scale_range\": [0, 1024] } ] } } @pytest.fixture def mock_range(): times", "\"bits\": 3, \"values\": { '0': False, '1': True }, }, \"high_slope\": { \"description\":", "\"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"swir1\": 1.0}, \"green\": {\"nir\": 1.0},", "{ \"red\": {\"red\": 1.0}, \"green\": ndvi, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000),", "3700), }, \"green\": { \"nir\": 1.0, \"scale_range\": (1600, 3200), }, \"blue\": {\"green\": 1.0},", "Cube project. # See https://opendatacube.org for more information. # # Copyright (c) 2017-2021", "1.0] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, },", "True }, }, } }) }) return output @pytest.fixture def dummy_raw_fc_data(): output =", "EO band is missing or saturated\", \"bits\": 1, \"values\": { '0': False, '1':", "s3 = S3FileSystem(anon=False, client_kwargs={\"endpoint_url\": MOTO_S3_ENDPOINT_URI}) s3.invalidate_cache() yield s3 @pytest.fixture def s3_config_simple(s3): config_uri =", "\"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False, \"cloud\": False, \"water_observed\": False, } } ]", "Angle\", \"abstract\": \"\", \"flags\": { \"or\": { \"terrain_shadow\": True, \"low_solar_angle\": True }, },", "with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'[{\"test\": 88888}, {\"include\": \"simple.json\", \"type\": \"json\"}]') @pytest.fixture def", "splodgy looking\" }, \"ugly\": { \"bits\": 1, \"values\": { '0': False, '1': True", "\"red\": { \"swir1\": 1.0, \"scale_range\": (1500, 3700), }, \"green\": { \"nir\": 1.0, \"scale_range\":", "}, \"alpha\": 0.0, \"color\": \"#ffffff\", }, { \"title\": \"Cloudy Steep Terrain\", \"abstract\": \"\",", "[ { \"band\": \"water\", \"enum\": 1, } ] }, { \"components\": { \"red\":", "'badnativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:9999\" elif 'nativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:4326\"", "global_cfg.published_CRSs = { \"EPSG:3857\": { # Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\":", "}, }, \"layers\": [] } @pytest.fixture def wcs_global_cfg(): return { \"formats\": { #", "\"description\": \"At least one EO band is missing or saturated\", \"bits\": 1, \"values\":", "\"alpha\": 0.0, \"color\": \"#ffffff\", }, { \"title\": \"Cloudy Steep Terrain\", \"abstract\": \"\", \"flags\":", "}, { \"title\": \"Cloudy Water\", \"abstract\": \"\", \"flags\": { \"and\": { \"water_observed\": True,", "\"min\": -0.1, \"max\": 0.1, }, \"lon\": { \"min\": -0.1, \"max\": 0.1, }, \"times\":", "{ \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0}}, \"scale_range\": [0.0, 100.0],", "0.0, }, { \"value\": 0.0, \"color\": \"#000000\", \"alpha\": 0.0, }, { \"value\": 0.1,", "{ \"red\": {\"band1\": 1.0}, \"green\": {\"band1\": 1.0}, \"blue\": {\"band1\": 1.0}, }, \"scale_range\": [0,", "True, \"high_slope\": True}}, \"color\": \"BurlyWood\", }, # Only matches non-cloudy high-slopes. { \"title\":", "] } xyt_coords = [ (\"x\", [-1.0, -0.5, 0.0, 0.5, 1.0]), (\"y\", [-1.0,", "This file is part of datacube-ows, part of the Open Data Cube project.", "s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"include\": \"simple.json\", \"type\": \"json\"}') @pytest.fixture def s3_config_nested_2(s3, s3_config_simple): config_uri", "{ \"title\": \"Terrain\", \"abstract\": \"\", # Flag rules can contain an \"or\" -", "1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"enum\": 1, }", "dummy_da(-1, \"uv\", coords), }) return output @pytest.fixture def null_mask(): return dummy_da(True, \"mask\", coords,", "+ data[\"red\"]) # Scale to [-1.0 - 1.0] to [0 - 255] scaled", "500, 0, 400, 300, 200], dim_coords), \"blue\": dim1_da(\"blue\", [200, 500, 1000, 600, 100,", "1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000, 8000), }, {", "{\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": {", "s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 1234}') @pytest.fixture def s3_config_nested_1(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_1.json\"", "[-1.0, 1.0], } ] } xyt_coords = [ (\"x\", [-1.0, -0.5, 0.0, 0.5,", "# Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", }, \"EPSG:4326\": { #", "}, \"green\": {\"nir\": 1.0}, \"blue\": { \"swir1\": 0.5, \"swir2\": 0.5, }, }, \"scale_range\":", "1.0}, \"blue\": {\"npv\": 1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { # Mask", "200], dim_coords), \"blue\": dim1_da(\"blue\", [200, 500, 1000, 600, 100, 700], dim_coords), \"uv\": dim1_da(\"uv\",", "output def dim1_null_mask(coords): return dim1_da(\"mask\", [True] * len(coords), coords) @pytest.fixture def raw_calc_null_mask(): dim_coords", "-1.0, 0.0, -1.0, -2.0, -3.0] output = xr.Dataset({ \"pq\": dim1_da(\"pq\", [0b01000, 0b11001, 0b01010,", "True, \"water_observed\": False, }, \"invert\": True, }, ] } ] @pytest.fixture def multi_date_cfg():", ") @pytest.fixture def flask_client(monkeypatch): monkeypatch.setenv(\"DEFER_CFG_PARSE\", \"yes\") from datacube_ows.ogc import app with app.test_client() as", "client_kwargs={\"endpoint_url\": MOTO_S3_ENDPOINT_URI}) s3.invalidate_cache() yield s3 @pytest.fixture def s3_config_simple(s3): config_uri = \"s3://testbucket/simple.json\" with s3.open(config_uri,", "Flag rules can contain an \"or\" - they match if either of the", "\"Cloud Shadow and High Slope\", \"abstract\": \"\", # Flag rules can contain an", "\"nir\", coords, dtype=np.int16), \"swir1\": dummy_da(1051, \"swir1\", coords, dtype=np.int16), \"swir2\": dummy_da(1051, \"swir2\", coords, dtype=np.int16),", "\"EPSG:4326\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, \"EPSG:3577\": {\"top\": 0.1,", "100, 700], dim_coords), \"uv\": dim1_da(\"uv\", [400, 600, 900, 200, 400, 100], dim_coords), \"pq\":", "True } }, \"color\": \"#bad4f2\", }, { \"title\": \"Shaded Water\", \"abstract\": \"\", \"flags\":", "\"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, \"alpha\": { \"function\": \"datacube_ows.band_utils.norm_diff\",", "= 5 while timeout > 0: try: r = requests.get(MOTO_S3_ENDPOINT_URI) if r.ok: break", "\"values\": { '0': False, '1': True } }, \"cloud_shadow\": { \"description\": \"Cloud shadow\",", "False, \"gml_name\": \"TEST/NATIVE_CRS\", \"alias_of\": None, }, } global_cfg.folder_index = { \"folder.existing_folder\": MagicMock(), }", "1.0, \"color\": \"#FF9090\", }, ], \"multi_date\": [ { \"allowed_count_range\": [2, 2], \"preserve_user_date_order\": True,", "as np import pytest import requests import xarray import xarray as xr from", "def multi_date_cfg(): return { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"},", "-0.1, \"left\": -0.1, \"right\": 0.1, }, \"EPSG:3577\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1,", "Style\", \"abstract\": \"\", \"components\": { \"red\": {\"band1\": 1.0}, \"green\": {\"band1\": 1.0}, \"blue\": {\"band1\":", "* len(coords), coords) @pytest.fixture def raw_calc_null_mask(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0,", "{ \"nir\": 1.0, \"scale_range\": (1600, 3200), }, \"blue\": {\"green\": 1.0}, }, \"scale_range\": (200,", "1024] } ] } } @pytest.fixture def minimal_multiprod_cfg(): return { \"title\": \"The Title\",", "datacube_ows.styles.api import scalable @scalable def scaled_ndvi(data): # Calculate NDVI (-1.0 to 1.0) return", "- data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) # Scale to [-1.0 - 1.0] to", "dim_coords), \"uv\": dim1_da(\"uv\", [400, 600, 900, 200, 400, 100], dim_coords), \"pq\": dim1_da(\"pq\", [0b000,", "\"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"#96966e\", }, ] }, }, ] @pytest.fixture", "MagicMock() global_cfg.keywords = {\"global\"} global_cfg.attribution.title = \"Global Attribution\" global_cfg.contact_org = None global_cfg.contact_position =", "\"verbal_tics\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/NATIVE_CRS\", \"alias_of\": None, }, } global_cfg.folder_index = { \"folder.existing_folder\":", "proc = subprocess.Popen([\"moto_server\", \"s3\", \"-p\", MOTO_PORT]) timeout = 5 while timeout > 0:", "{\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000, 3000), },", "}, \"color\": \"#f2dcb4\", }, { \"title\": \"Cloudy Water\", \"abstract\": \"\", \"flags\": { \"and\":", "0.0, \"color\": \"#ffffff\", }, { \"title\": \"Cloudy Steep Terrain\", \"abstract\": \"\", \"flags\": {", "False, '1': True } }, \"cloud_shadow\": { \"description\": \"Cloud shadow\", \"bits\": 5, \"values\":", "nb, \"foo_nonativeres\": nb, \"foo_badnativecrs\": nb, \"foo_nativecrs\": nb, \"foo_nonativecrs\": nb, \"foo\": nb, \"bar\": nb,", "[ { \"band\": \"water\", \"flags\": { \"nodata\": False, \"noncontiguous\": False, \"terrain_shadow\": False, \"low_solar_angle\":", "{\"pv\": 1.0}, \"blue\": {\"npv\": 1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { #", "(50, 3000), }, { \"components\": { \"red\": { \"red\": 0.333, \"green\": 0.333, \"blue\":", "output = xr.Dataset({ \"pq\": dim1_da(\"pq\", [0b01000, 0b11001, 0b01010, 0b10011, 0b00100, 0b10111], dim_coords, attrs={", "\"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"red\": 1.0}, \"blue\": {\"red\": 1.0}, }, \"scale_range\":", "import numpy as np import pytest import requests import xarray import xarray as", "global_cfg = MagicMock() global_cfg.keywords = {\"global\"} global_cfg.attribution.title = \"Global Attribution\" global_cfg.contact_org = None", "\"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 0.5), \"scale_to\": (0, 255)", "f_open.write(b'{\"include\": \"simple.json\", \"type\": \"json\"}') @pytest.fixture def s3_config_nested_2(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_2.json\" with s3.open(config_uri,", "}, }, \"noncontiguous\": { \"description\": \"At least one EO band is missing or", "return output @pytest.fixture def dummy_raw_ls_data(): output = xr.Dataset({ \"red\": dummy_da(5, \"red\", coords, dtype=np.int16),", "{\"bits\": 3}, \"zap\": {\"bits\": 4}, \"dang\": {\"bits\": 5}, } mprod.lookup_measurements.return_value = { \"band4\":", "MOTO_S3_ENDPOINT_URI, coords, dim1_da, dummy_da) def get_boto3_client(): from botocore.session import Session session = Session()", "\"Global Abstract\" global_cfg.authorities = { \"auth0\": \"http://test.url/auth0\", \"auth1\": \"http://test.url/auth1\", } global_cfg.published_CRSs = {", "Calculate NDVI (-1.0 to 1.0) return (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] + data[\"red\"])", "-0.1, \"right\": 0.1, }, \"EPSG:3577\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1,", "\"band\": \"water\", \"enum\": 1, \"invert\": True, }, { # Mask out pixels with", "\"green\": dummy_da(7, \"green\", coords, dtype=np.int16), \"blue\": dummy_da(2, \"blue\", coords, dtype=np.int16), \"nir\": dummy_da(101, \"nir\",", "\"high_slope\": True, \"cloud\": True } }, \"color\": \"#f2dcb4\", }, { \"title\": \"Cloudy Water\",", "= \"Global Attribution\" global_cfg.contact_org = None global_cfg.contact_position = None global_cfg.abstract = \"Global Abstract\"", "\"color\": \"#e0e070\", }, { \"value\": 1.0, \"color\": \"#90FF90\", } ] }, { \"components\":", "\"vertical_coord\": \"verbal_tics\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/NATIVE_CRS\", \"alias_of\": None, }, } global_cfg.folder_index = {", "with app.test_client() as client: yield client @pytest.fixture def minimal_dc(): dc = MagicMock() nb", "3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {}, \"blue\": {}, },", "@pytest.fixture def dummy_raw_calc_data(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] output =", "f_open: f_open.write(b'{\"test\": 3222, \"things\": [{\"test\": 2572, \"thing\": null}, \\ {\"test\": 2573, \"thing\": {\"include\":", "{\"bits\": 1}, \"blat\": {\"bits\": 2}, \"pow\": {\"bits\": 3}, \"zap\": {\"bits\": 4}, \"dang\": {\"bits\":", "\"extension\": \"tif\", # Whether or not the file format supports multiple time slices.", "\"\", # Flag rules can contain an \"and\" - they match if all", "def s3_config_nested_1(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_1.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"include\": \"simple.json\",", "{ # WGS-84 \"geographic\": True, \"vertical_coord_first\": True }, }, }, \"layers\": [] }", "{\"cloud_shadow\": True}, \"color\": \"#4b4b37\", }, { \"title\": \"Terrain Shadow or Low Sun Angle\",", "400, 100], dim_coords), \"pq\": dim1_da(\"pq\", [0b000, 0b001, 0b010, 0b011, 0b100, 0b111], dim_coords, attrs={", "'nativeres' in s: mprod.definition[\"storage\"][\"resolution\"] = { \"latitude\": 0.001, \"longitude\": 0.001, } else: pass", "coords), }) return output @pytest.fixture def null_mask(): return dummy_da(True, \"mask\", coords, dtype=np.bool) @pytest.fixture", "-0.1, \"left\": -0.1, \"right\": 0.1, }, \"EPSG:3857\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1,", "\"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"green\", \"band2\": \"nir\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0,", "lmo = MagicMock() lmo.loc = { \"foo_nativeres\": nb, \"foo_nonativeres\": nb, \"foo_badnativecrs\": nb, \"foo_nativecrs\":", "}, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": { \"red\": 0.333, \"green\":", "\"color\": \"#703070\", }, { \"value\": 0.6, \"color\": \"#e0e070\", }, { \"value\": 1.0, \"color\":", "{ \"red\": 0.333, \"green\": 0.333, \"blue\": 0.333, }, \"green\": {\"nir\": 1.0}, \"blue\": {", "\"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 2574, \"thing\": {\"include\": \"nested_3.json\", \"type\": \"json\"}}]}'", "\"impossible\": { \"bits\": 0, \"values\": { '0': False, '1': \"Woah!\" }, \"description\": \"Won't", "\"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0,", "}, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"Beige\", }, {", "s3_config_nested_4(s3, s3_config_simple, s3_config_nested_3): config_uri = \"s3://testbucket/nested_4.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 3222,", "\"green\", coords, dtype=np.int16), \"blue\": dummy_da(2, \"blue\", coords, dtype=np.int16), \"nir\": dummy_da(101, \"nir\", coords, dtype=np.int16),", "\"simple.json\", \"type\": \"json\"}]') @pytest.fixture def s3_config_nested_3(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_3.json\" with s3.open(config_uri, \"wb\")", "{ \"band4\": { \"flags_definition\": flag_def } } mprod.definition = {\"storage\": {}} if 'nonativecrs'", "{\"red\": 1.0}, \"green\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\":", "\"The Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"multi_product\": True, \"product_names\": [\"foo\", \"bar\"], \"image_processing\":", "] } } @pytest.fixture def mock_range(): times = [datetime.datetime(2010, 1, 1), datetime.datetime(2010, 1,", "decision tree\", \"bits\": 7, \"values\": { '0': False, '1': True }, }, }", "- 1.0] to [0 - 255] scaled = ((unscaled + 1.0) * 255", "False, \"cloud\": False, \"water_observed\": False, } } ] }, { \"components\": { \"red\":", "\"description\": \"Real, real ugly\", }, \"impossible\": { \"bits\": 0, \"values\": { '0': False,", "Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"product_name\": \"foo\", \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", },", "observed \"band\": \"water\", \"flags\": { \"cloud\": True, \"water_observed\": False, }, \"invert\": True, },", "{ \"band\": \"water\", \"enum\": 1, \"invert\": True, } ] }, { \"components\": {", "\"layers\": [] } @pytest.fixture def wcs_global_cfg(): return { \"formats\": { # Key is", "\"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000, 8000), }, { \"components\": { \"red\": {\"red\":", "\"alpha\": 0.0, }, { \"value\": 0.1, \"color\": \"#000030\", \"alpha\": 1.0, }, { \"value\":", "\"min\": -0.1, \"max\": 0.1, }, \"times\": times, \"start_time\": times[0], \"end_time\": times[-1], \"time_set\": set(times),", "f_open: f_open.write(b'[{\"test\": 88888}, {\"include\": \"simple.json\", \"type\": \"json\"}]') @pytest.fixture def s3_config_nested_3(s3, s3_config_simple): config_uri =", "\"vertical_coord\": \"y\", }, \"EPSG:4326\": { # WGS-84 \"geographic\": True, \"vertical_coord_first\": True }, },", "\"#4f81bd\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"#96966e\", },", "False, '1': True }, \"description\": \"Real, real ugly\", }, \"impossible\": { \"bits\": 0,", "False, \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False, \"cloud\": False, \"water_observed\": False, } }", "\"band\": \"water\", \"flags\": { \"cloud\": True, \"water_observed\": False, }, \"invert\": True, }, ]", "= xr.Dataset({ \"water\": dummy_da(0b101, \"red\", coords, dtype=np.uint8, attrs = { \"flags_definition\": { \"nodata\":", "[0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"flags\": { \"nodata\": False, \"noncontiguous\": False,", "return [ { \"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\": [", "\"bits\": 7, \"values\": { '0': False, '1': True }, }, } }) })", "def dummy_col_map_data(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] output = xr.Dataset({", "\"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"multi_product\": True, \"product_names\": [\"foo\", \"bar\"], \"image_processing\": { \"extent_mask_func\":", "pass elif 'badnativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:9999\" elif 'nativecrs' in s: mprod.definition[\"storage\"][\"crs\"]", "{ \"min\": -0.1, \"max\": 0.1, }, \"times\": times, \"start_time\": times[0], \"end_time\": times[-1], \"time_set\":", "def dim1_null_mask(coords): return dim1_da(\"mask\", [True] * len(coords), coords) @pytest.fixture def raw_calc_null_mask(): dim_coords =", "\"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"Beige\", }, { \"title\": \"Terrain\", \"abstract\": \"\",", "\"water_observed\": True, \"cloud\": True } }, \"color\": \"#bad4f2\", }, { \"title\": \"Shaded Water\",", "\"Woah!\" }, \"description\": \"Won't happen. Can't happen. Might happen.\", }, } }) })", "\"pq_masks\": [ { \"band\": \"water\", \"enum\": 1, \"invert\": True, } ] }, {", "# Flag rules can contain an \"or\" - they match if either of", "with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"include\": \"simple.json\", \"type\": \"json\"}') @pytest.fixture def s3_config_nested_2(s3, s3_config_simple):", "0.5, 1.0]), (\"y\", [-1.0, -0.5, 0.0, 0.5, 1.0]), (\"time\", [ datetime.datetime(2021, 1, 1,", "{ \"description\": \"At least one EO band is missing or saturated\", \"bits\": 1,", "}, \"scale_range\": (1000, 3000), }, { \"components\": { \"red\": { \"swir1\": 1.0, \"scale_range\":", "}, { \"title\": \"Shaded Water\", \"abstract\": \"\", \"flags\": { \"and\": { \"water_observed\": True,", "Terrain\", \"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\": \"#776857\", }, { \"title\": \"Water\", \"abstract\":", "{ \"value\": 1.0, \"color\": \"#90FF90\", } ] }, { \"components\": { \"red\": {\"red\":", "}, \"EPSG:4326\": { # WGS-84 \"geographic\": True, \"vertical_coord_first\": True }, }, }, \"layers\":", "\"values\": { '0': False, '1': \"Woah!\" }, \"description\": \"Won't happen. Can't happen. Might", "[-1.0, -0.5, 0.0, 0.5, 1.0]), (\"y\", [-1.0, -0.5, 0.0, 0.5, 1.0]), (\"time\", [", "nb, \"foo_nativecrs\": nb, \"foo_nonativecrs\": nb, \"foo\": nb, \"bar\": nb, } dc.list_measurements.return_value = lmo", "{ # Mask out nodata pixels. \"band\": \"water\", \"enum\": 1, \"invert\": True, },", "False, '1': True }, }, \"noncontiguous\": { \"description\": \"At least one EO band", "{ \"terrain_shadow\": True, \"low_solar_angle\": True }, }, \"color\": \"#2f2922\", }, { \"title\": \"Steep", "\"s3://testbucket/nested_1.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"include\": \"simple.json\", \"type\": \"json\"}') @pytest.fixture def s3_config_nested_2(s3,", "\"red\", coords), \"green\": dummy_da(7, \"green\", coords), \"blue\": dummy_da(2, \"blue\", coords), \"uv\": dummy_da(-1, \"uv\",", "True, \"nodata\": True, }, }, \"alpha\": 0.0, \"color\": \"#ffffff\", }, { \"title\": \"Cloudy", "import S3FileSystem from tests.utils import (MOTO_PORT, MOTO_S3_ENDPOINT_URI, coords, dim1_da, dummy_da) def get_boto3_client(): from", "\"TEST/NATIVE_CRS\", \"alias_of\": None, }, } global_cfg.folder_index = { \"folder.existing_folder\": MagicMock(), } return global_cfg", "]) ] @pytest.fixture def xyt_dummydata(): return xarray.Dataset({ \"red\": dummy_da(1400, \"red\", xyt_coords, dtype=\"int16\"), \"green\":", "\"cloud_shadow\": False, \"cloud\": False, \"water_observed\": False, } } ] }, { \"components\": {", "dim_coords), \"pq\": dim1_da(\"pq\", [0b000, 0b001, 0b010, 0b011, 0b100, 0b111], dim_coords, attrs={ \"flags_definition\": {", "\"water_observed\": False, } } ] }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\":", "'0': False, '1': True }, }, } }) }) return output @pytest.fixture def", "\"pv\", coords, dtype=np.int16), \"npv\": dummy_da(209, \"npv\", coords, dtype=np.int16), }) return output @pytest.fixture def", "nb.index = ['band1', 'band2', 'band3', 'band4'] nb.__getitem__.return_value = { \"band1\": -999, \"band2\": -999,", "}, } } @pytest.fixture def minimal_global_raw_cfg(): return { \"global\": { \"title\": \"Test Title\",", "{\"bits\": 4}, \"dang\": {\"bits\": 5}, } mprod.lookup_measurements.return_value = { \"band4\": { \"flags_definition\": flag_def", "\"#005050\", }, {\"value\": -0.1, \"color\": \"#505050\", }, {\"value\": -0.01, \"color\": \"#303030\", }, {\"value\":", "\"Cloudy Slopes\", \"abstract\": \"\", \"flags\": {\"and\": {\"cloud\": True, \"high_slope\": True}}, \"color\": \"BurlyWood\", },", "{\"npv\": 1.0}}, \"scale_range\": [0.0, 100.0], }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\":", "1.0) return (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) return [ { \"components\":", "\"flags_definition\": { \"nodata\": { \"bits\": 0, \"description\": \"No data\", \"values\": { '0': False,", "\"function\": \"datacube_ows.band_utils.norm_diff\", \"mapped_bands\": True, \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"RdYlGn\", \"range\":", "\"value\": -1.0, \"color\": \"#000000\", \"alpha\": 0.0, }, { \"value\": 0.0, \"color\": \"#000000\", \"alpha\":", "{ \"components\": { \"red\": {\"swir1\": 1.0}, \"green\": {\"nir\": 1.0}, \"blue\": {\"green\": 1.0}, },", "pixels with cloud AND no water observed \"band\": \"water\", \"flags\": { \"cloud\": True,", "\"water\": [ { # Make noncontiguous data transparent \"title\": \"\", \"abstract\": \"\", \"flags\":", "Scale to [-1.0 - 1.0] to [0 - 255] scaled = ((unscaled +", "\"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3857\", \"alias_of\": None, }, \"EPSG:4326\": { #", "\"allowed_urls\": [ \"http://localhost\", \"http://unsecure.domain.com/odc\", \"https://secure.domain.com/ows\", ], \"published_CRSs\": { \"EPSG:3857\": { # Web Mercator", "{\"water_observed\": False}, \"color\": \"#96966e\", }, ] }, }, ] @pytest.fixture def configs_for_combined_fc_wofs(): return", "False, '1': True }, }, \"water_observed\": { \"description\": \"Classified as water by the", "True, \"low_solar_angle\": True }, }, \"color\": \"#2f2922\", }, { \"title\": \"Steep Terrain\", \"abstract\":", "\"green\": { \"nir\": 1.0, \"scale_range\": (1600, 3200), }, \"blue\": {\"green\": 1.0}, }, \"scale_range\":", "client = get_boto3_client() client.create_bucket(Bucket=\"testbucket\") S3FileSystem.clear_instance_cache() s3 = S3FileSystem(anon=False, client_kwargs={\"endpoint_url\": MOTO_S3_ENDPOINT_URI}) s3.invalidate_cache() yield s3", "[ { \"name\": \"band1\", \"title\": \"Single Band Test Style\", \"abstract\": \"\", \"components\": {", "{\"npv\": 1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"enum\": 1,", "output @pytest.fixture def dummy_raw_fc_plus_wo(dummy_raw_fc_data, dummy_raw_wo_data): return xarray.combine_by_coords( [dummy_raw_fc_data, dummy_raw_wo_data], join=\"exact\") @pytest.fixture def configs_for_landsat():", "[-1.0, 1.0] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"},", "\"terrain_shadow\": False, \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False, \"cloud\": False, \"water_observed\": False, }", "the image, as used in the Http Response. \"mime\": \"image/geotiff\", # The file", "}, \"scale_range\": (50, 3000), }, ] @pytest.fixture def configs_for_wofs(): return [ { \"name\":", "\"enum\": 1, \"invert\": True, }, { # Mask out pixels with low_solar_angle, high_slope", "\"type\": \"json\"}}, \\ {\"test\": 22564, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def", "dim1_da(\"uv\", [400, 600, 900, 200, 400, 100], dim_coords), \"pq\": dim1_da(\"pq\", [0b000, 0b001, 0b010,", "= S3FileSystem(anon=False, client_kwargs={\"endpoint_url\": MOTO_S3_ENDPOINT_URI}) s3.invalidate_cache() yield s3 @pytest.fixture def s3_config_simple(s3): config_uri = \"s3://testbucket/simple.json\"", "{\"band1\": 1.0}, \"green\": {\"band1\": 1.0}, \"blue\": {\"band1\": 1.0}, }, \"scale_range\": [0, 1024] }", "import requests import xarray import xarray as xr from s3fs.core import S3FileSystem from", "\"Shaded Water\", \"abstract\": \"\", \"flags\": { \"and\": { \"water_observed\": True, \"cloud_shadow\": True }", "\"enum\": 1, } ] }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\":", "hold. \"flags\": {\"or\": {\"terrain_shadow\": True, \"high_slope\": True}}, \"color\": \"SlateGray\", }, { \"title\": \"Cloud", "\\ \"test_json\": {\"include\": \"tests/cfg/simple.json\", \"type\": \"json\"}}}' ) @pytest.fixture def flask_client(monkeypatch): monkeypatch.setenv(\"DEFER_CFG_PARSE\", \"yes\") from", "2572, \"thing\": null}, \\ {\"test\": 2573, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\":", "return dim1_da(\"mask\", [True] * len(coords), coords) @pytest.fixture def raw_calc_null_mask(): dim_coords = [-2.0, -1.0,", "dummy_raw_fc_data(): output = xr.Dataset({ \"bs\": dummy_da(546, \"bs\", coords, dtype=np.int16), \"pv\": dummy_da(723, \"pv\", coords,", "True, \"vertical_coord_first\": True, \"horizontal_coord\": \"longitude\", \"vertical_coord\": \"latitude\", \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/4326\", \"alias_of\": None, }, \"EPSG:3577\":", "\"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"enum\": 1, } ] },", "}, ] } }, { \"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": {", "{ \"foo_nativeres\": nb, \"foo_nonativeres\": nb, \"foo_badnativecrs\": nb, \"foo_nativecrs\": nb, \"foo_nonativecrs\": nb, \"foo\": nb,", "(50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": { \"function\": scaled_ndvi,", "[ { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0}},", "}, }, \"cloud\": { \"description\": \"Cloudy\", \"bits\": 6, \"values\": { '0': False, '1':", "\"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", }, { \"title\": \"Cloud\",", "\"start_time\": times[0], \"end_time\": times[-1], \"time_set\": set(times), \"bboxes\": { \"EPSG:4326\": {\"top\": 0.1, \"bottom\": -0.1,", "dummy_col_map_data(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] output = xr.Dataset({ \"pq\":", "dtype=np.int16), }) return output @pytest.fixture def dummy_raw_fc_plus_wo(dummy_raw_fc_data, dummy_raw_wo_data): return xarray.combine_by_coords( [dummy_raw_fc_data, dummy_raw_wo_data], join=\"exact\")", "-0.1, \"right\": 0.1, }, \"EPSG:3857\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1,", "@pytest.fixture def s3_config_mixed_nested(s3, s3_config_simple): config_uri = \"s3://testbucket/mixed_nested.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\":", "{ '0': False, '1': True }, }, \"low_solar_angle\": { \"description\": \"Low solar incidence", "}, \"scale_range\": [0, 1024] } ] } } @pytest.fixture def mock_range(): times =", "{ \"bits\": 1, \"values\": { '0': False, '1': True }, \"description\": \"Real, real", "is part of datacube-ows, part of the Open Data Cube project. # See", "coords) @pytest.fixture def raw_calc_null_mask(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] return", "app with app.test_client() as client: yield client @pytest.fixture def minimal_dc(): dc = MagicMock()", "\"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, \"alpha\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\":", "255) } }, }, \"scale_range\": (50, 3000), }, ] @pytest.fixture def configs_for_wofs(): return", "Terrain\", \"abstract\": \"\", \"flags\": { \"and\": { \"high_slope\": True, \"cloud\": True } },", "\"#e0e070\", }, { \"value\": 1.0, \"color\": \"#90FF90\", } ] }, { \"components\": {", "\"flags\": {\"and\": {\"cloud_shadow\": True, \"high_slope\": True}}, \"color\": \"DarkKhaki\", }, { \"title\": \"Dry\", \"abstract\":", "{ \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0} },", "}, \"description\": \"All splodgy looking\" }, \"flavour\": { \"bits\": 3, \"values\": { '0':", "-1.0, 0.0, -1.0, -2.0, -3.0] return dim1_da(\"mask\", [True] * len(dim_coords), dim_coords) @pytest.fixture def", "\"bs\", coords, dtype=np.int16), \"pv\": dummy_da(723, \"pv\", coords, dtype=np.int16), \"npv\": dummy_da(209, \"npv\", coords, dtype=np.int16),", "} } @pytest.fixture def minimal_global_raw_cfg(): return { \"global\": { \"title\": \"Test Title\", \"info_url\":", "0b001, 0b010, 0b011, 0b100, 0b111], dim_coords, attrs={ \"flags_definition\": { \"splodgy\": { \"bits\": 2,", "# Only matches non-cloudy high-slopes. { \"title\": \"High Slopes\", \"abstract\": \"\", \"flags\": {\"high_slope\":", "\"green\": {\"band1\": 1.0}, \"blue\": {\"band1\": 1.0}, }, \"scale_range\": [0, 1024] } ] }", "{ \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [ {\"value\": -1.0,", "\"scale_range\": (1000, 3000), }, { \"components\": { \"red\": { \"swir1\": 1.0, \"scale_range\": (1500,", "{\"blue\": 1.0}, }, \"scale_range\": (1000, 8000), }, { \"components\": { \"red\": {\"red\": 1.0},", "} ] } xyt_coords = [ (\"x\", [-1.0, -0.5, 0.0, 0.5, 1.0]), (\"y\",", "dim1_da(\"red\", [200, 500, 0, 200, 200, 700], dim_coords), \"green\": dim1_da(\"green\", [100, 500, 0,", "\"green\": {\"nir\": 1.0}, \"blue\": { \"swir1\": 0.5, \"swir2\": 0.5, }, }, \"scale_range\": (50,", "-999, \"band3\": float(\"nan\"), \"band4\": \"nan\", } lmo = MagicMock() lmo.loc = { \"foo_nativeres\":", "\"right\": 0.1, }, \"EPSG:3857\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, },", "\"red\", \"scale_from\": (0.0, 0.5), \"scale_to\": (0, 255) } }, }, \"scale_range\": (50, 3000),", "\"flags\": {\"or\": {\"terrain_shadow\": True, \"high_slope\": True}}, \"color\": \"SlateGray\", }, { \"title\": \"Cloud Shadow", "# Calculate NDVI (-1.0 to 1.0) unscaled = (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"]", "['band1', 'band2', 'band3', 'band4'] nb.__getitem__.return_value = { \"band1\": -999, \"band2\": -999, \"band3\": float(\"nan\"),", "{ \"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\": [ # Cloudy", "def scaled_ndvi(data): # Calculate NDVI (-1.0 to 1.0) return (data[\"nir\"] - data[\"red\"]) /", "}) }) return output @pytest.fixture def dummy_raw_fc_data(): output = xr.Dataset({ \"bs\": dummy_da(546, \"bs\",", "unscaled = (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) # Scale to [-1.0", "} }, }, \"scale_range\": (50, 3000), }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"mapped_bands\":", "\"band2\": \"red\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, \"blue\": { \"function\":", "'nonativecrs' in s: pass elif 'badnativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:9999\" elif 'nativecrs'", "}, \"TEST:CRS\": { \"geographic\": False, \"horizontal_coord\": \"horrible_zonts\", \"vertical_coord\": \"vertex_calories\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/CRS\",", "\"color\": \"SlateGray\", }, { \"title\": \"Cloud Shadow and High Slope\", \"abstract\": \"\", #", "\"cloud_shadow\": True } }, \"color\": \"#335277\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\":", "{ \"description\": \"Cloud shadow\", \"bits\": 5, \"values\": { '0': False, '1': True },", "{\"nir\": 1.0}, \"blue\": {\"green\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": {", "\"abstract\": \"\", \"flags\": { \"water_observed\": True, }, \"color\": \"#4f81bd\", }, { \"title\": \"Dry\",", "monkeypatch.setenv(\"AWS_SECRET_ACCESS_KEY\", \"bar\") client = get_boto3_client() client.create_bucket(Bucket=\"testbucket\") S3FileSystem.clear_instance_cache() s3 = S3FileSystem(anon=False, client_kwargs={\"endpoint_url\": MOTO_S3_ENDPOINT_URI}) s3.invalidate_cache()", "False, \"terrain_shadow\": False, \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False, \"cloud\": False, \"water_observed\": False,", "{ \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [", "{ \"description\": \"High slope\", \"bits\": 4, \"values\": { '0': False, '1': True }", "\"npv\", coords, dtype=np.int16), }) return output @pytest.fixture def dummy_raw_fc_plus_wo(dummy_raw_fc_data, dummy_raw_wo_data): return xarray.combine_by_coords( [dummy_raw_fc_data,", "subprocess.Popen([\"moto_server\", \"s3\", \"-p\", MOTO_PORT]) timeout = 5 while timeout > 0: try: r", "0.0, -1.0, -2.0, -3.0] return dim1_da(\"mask\", [True] * len(dim_coords), dim_coords) @pytest.fixture def dummy_col_map_data():", "} }, \"color\": \"#f2dcb4\", }, { \"title\": \"Cloudy Water\", \"abstract\": \"\", \"flags\": {", "[ { \"band\": \"water\", \"enum\": 1, \"invert\": True, } ] }, { \"components\":", "\"max\": 0.1, }, \"lon\": { \"min\": -0.1, \"max\": 0.1, }, \"times\": times, \"start_time\":", "\"band\": \"water\", \"flags\": { \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False, } }, {", "= { \"latitude\": 0.001, \"longitude\": 0.001, } else: pass return mprod dc.index.products.get_by_name =", "@pytest.fixture def dummy_col_map_data(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] output =", "dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] output = xr.Dataset({ \"pq\": dim1_da(\"pq\",", "\"flags_definition\": { \"joviality\": { \"bits\": 3, \"values\": { '0': \"Melancholic\", '1': \"Joyous\", },", "\"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False, } }, { # Mask out pixels", "return output @pytest.fixture def null_mask(): return dummy_da(True, \"mask\", coords, dtype=np.bool) @pytest.fixture def dummy_raw_calc_data():", "\"color\": \"#4b4b37\", }, { \"title\": \"Terrain Shadow or Low Sun Angle\", \"abstract\": \"\",", "-0.5, 0.0, 0.5, 1.0]), (\"time\", [ datetime.datetime(2021, 1, 1, 22, 44, 5), datetime.datetime.now()", "contain an \"or\" - they match if either of the conditions hold. \"flags\":", "datetime.datetime(2010, 1, 2)] return { \"lat\": { \"min\": -0.1, \"max\": 0.1, }, \"lon\":", "\"ir\": dummy_da(3, \"ir\", coords), \"red\": dummy_da(5, \"red\", coords), \"green\": dummy_da(7, \"green\", coords), \"blue\":", "import scalable @scalable def scaled_ndvi(data): # Calculate NDVI (-1.0 to 1.0) return (data[\"nir\"]", "\"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"ocean_r\", \"range\":", "\"title\": \"Water\", \"abstract\": \"\", \"flags\": { \"water_observed\": True, }, \"color\": \"#4f81bd\", }, {", "monkeypatch): monkeypatch.setenv(\"AWS_ACCESS_KEY_ID\", \"foo\") monkeypatch.setenv(\"AWS_SECRET_ACCESS_KEY\", \"bar\") client = get_boto3_client() client.create_bucket(Bucket=\"testbucket\") S3FileSystem.clear_instance_cache() s3 = S3FileSystem(anon=False,", "{ \"value\": 0.6, \"color\": \"#e0e070\", }, { \"value\": 1.0, \"color\": \"#90FF90\", } ]", "0.1, }, \"lon\": { \"min\": -0.1, \"max\": 0.1, }, \"times\": times, \"start_time\": times[0],", "} return global_cfg @pytest.fixture def minimal_parent(): parent = MagicMock() parent.abstract = \"Parent Abstract\"", "\"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"#4b4b37\", }, { \"title\": \"Terrain Shadow or", "1.0], } ] } xyt_coords = [ (\"x\", [-1.0, -0.5, 0.0, 0.5, 1.0]),", "} dc.list_measurements.return_value = lmo def product_by_name(s): if 'lookupfail' in s: return None mprod", "pass elif 'nativeres' in s: mprod.definition[\"storage\"][\"resolution\"] = { \"latitude\": 0.001, \"longitude\": 0.001, }", "\"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0},", "Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3857\", \"alias_of\": None,", "}, }, \"scale_range\": (50, 3000), }, ] @pytest.fixture def configs_for_wofs(): return [ {", "low_solar_angle, high_slope # or cloud shadow. \"band\": \"water\", \"flags\": { \"low_solar_angle\": False, \"high_slope\":", "}, { \"value\": 1.0, \"color\": \"#90FF90\", } ] }, { \"components\": { \"red\":", "\"color\": \"#ffffff\", }, { \"title\": \"Cloudy Steep Terrain\", \"abstract\": \"\", \"flags\": { \"and\":", "\"cloud_shadow\": { \"description\": \"Cloud shadow\", \"bits\": 5, \"values\": { '0': False, '1': True", "{ '0': False, '1': \"Woah!\" }, \"description\": \"Won't happen. Can't happen. Might happen.\",", "\"type\": \"python\"}, \\ \"test_json\": {\"include\": \"tests/cfg/simple.json\", \"type\": \"json\"}}}' ) @pytest.fixture def flask_client(monkeypatch): monkeypatch.setenv(\"DEFER_CFG_PARSE\",", "\"https://secure.domain.com/ows\", ], \"published_CRSs\": { \"EPSG:3857\": { # Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\",", "\"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"#c2c1c0\", }, { \"title\": \"Cloud", "] }, }, ] @pytest.fixture def configs_for_combined_fc_wofs(): return [ { \"components\": { \"red\":", "dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] return dim1_da(\"mask\", [True] * len(dim_coords),", "\"wb\") as f_open: f_open.write(b'{\"test\": 1234}') @pytest.fixture def s3_config_nested_1(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_1.json\" with", "\"#000030\", \"alpha\": 1.0, }, { \"value\": 0.3, \"color\": \"#703070\", }, { \"value\": 0.6,", "\"EPSG:4326\" else: pass if 'nonativeres' in s: pass elif 'nativeres' in s: mprod.definition[\"storage\"][\"resolution\"]", "\"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 0.5), \"scale_to\": (0,", "3200), }, \"blue\": {\"green\": 1.0}, }, \"scale_range\": (200, 1900), }, { \"components\": {", "\"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", }, { \"title\": \"Cloud\", \"abstract\": \"\",", "1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000, 8000), }, { \"components\": { \"red\":", "\"uv\": dim1_da(\"uv\", [400, 600, 900, 200, 400, 100], dim_coords), \"pq\": dim1_da(\"pq\", [0b000, 0b001,", "come before the Cloud # and High Slopes rules. { \"title\": \"Cloudy Slopes\",", "Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"SlateGray\", }, { \"title\": \"Water\", \"abstract\":", "shadow\", \"bits\": 5, \"values\": { '0': False, '1': True }, }, \"cloud\": {", "\"wb\") as f_open: f_open.write(b'[{\"test\": 88888}, {\"include\": \"simple.json\", \"type\": \"json\"}]') @pytest.fixture def s3_config_nested_3(s3, s3_config_simple):", "the decision tree\", \"bits\": 7, \"values\": { '0': False, '1': True }, },", "\"things\": [{\"test\": 2572, \"thing\": null}, \\ {\"test\": 2573, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}},", "9364, \\ \"subtest\": {\"test_py\": {\"include\": \"tests.cfg.simple.simple\", \"type\": \"python\"}, \\ \"test_json\": {\"include\": \"tests/cfg/simple.json\", \"type\":", "\"Test Title\", \"info_url\": \"https://my.domain.com/about_us\", \"allowed_urls\": [ \"http://localhost\", \"http://unsecure.domain.com/odc\", \"https://secure.domain.com/ows\", ], \"published_CRSs\": { \"EPSG:3857\":", "parent.attribution.title = \"Parent Attribution\" return parent @pytest.fixture def minimal_layer_cfg(): return { \"title\": \"The", "}, \"flavour\": { \"bits\": 3, \"values\": { '0': \"Bland\", '1': \"Tasty\", }, \"description\":", "\"color\": \"SlateGray\", }, { \"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\",", "\"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3577\", \"alias_of\": None, }, \"TEST:CRS\": { \"geographic\": False,", "[-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] return dim1_da(\"mask\", [True] * len(dim_coords), dim_coords) @pytest.fixture", "[ { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0},", "High Slopes rules. { \"title\": \"Cloudy Slopes\", \"abstract\": \"\", \"flags\": {\"and\": {\"cloud\": True,", "False, '1': True }, }, \"terrain_shadow\": { \"description\": \"Terrain shadow\", \"bits\": 3, \"values\":", "3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\":", "\"#707030\", }, {\"value\": 1.0, \"color\": \"#FF9090\", }, ], \"multi_date\": [ { \"allowed_count_range\": [2,", "{\"include\": \"tests/cfg/simple.json\", \"type\": \"json\"}}}' ) @pytest.fixture def flask_client(monkeypatch): monkeypatch.setenv(\"DEFER_CFG_PARSE\", \"yes\") from datacube_ows.ogc import", "Mask out pixels with low_solar_angle, high_slope # or cloud shadow. \"band\": \"water\", \"flags\":", "for more information. # # Copyright (c) 2017-2021 OWS Contributors # SPDX-License-Identifier: Apache-2.0", "def ndvi(data): # Calculate NDVI (-1.0 to 1.0) unscaled = (data[\"nir\"] - data[\"red\"])", "\"high_slope\": True}}, \"color\": \"DarkKhaki\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False},", "\"cloud\": True } }, \"color\": \"#bad4f2\", }, { \"title\": \"Shaded Water\", \"abstract\": \"\",", "= MagicMock() nb.index = ['band1', 'band2', 'band3', 'band4'] nb.__getitem__.return_value = { \"band1\": -999,", "True, }, }, \"alpha\": 0.0, \"color\": \"#ffffff\", }, { \"title\": \"Cloudy Steep Terrain\",", "'1': \"Tasty\", }, \"description\": \"All splodgy looking\" }, \"splodgy\": { \"bits\": 2, \"values\":", "\"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [ {", "pytest import requests import xarray import xarray as xr from s3fs.core import S3FileSystem", "}, { \"value\": 0.1, \"color\": \"#000030\", \"alpha\": 1.0, }, { \"value\": 0.3, \"color\":", "\"flags\": { \"and\": { \"water_observed\": True, \"cloud\": True } }, \"color\": \"#bad4f2\", },", "Session() return session.create_client(\"s3\", endpoint_url=MOTO_S3_ENDPOINT_URI) @pytest.fixture def s3_base(): # writable local S3 system #", "an \"and\" - they match if all of the conditions hold. \"flags\": {\"and\":", "\"\", \"flags\": {\"high_slope\": True}, \"color\": \"#776857\", }, { \"title\": \"Water\", \"abstract\": \"\", \"flags\":", "0.1, }, \"times\": times, \"start_time\": times[0], \"end_time\": times[-1], \"time_set\": set(times), \"bboxes\": { \"EPSG:4326\":", "\"water\": [ # Cloudy Slopes rule needs to come before the Cloud #", "5}, } mprod.lookup_measurements.return_value = { \"band4\": { \"flags_definition\": flag_def } } mprod.definition =", "Cloudy Slopes rule needs to come before the Cloud # and High Slopes", "'1': True } }, \"cloud_shadow\": { \"description\": \"Cloud shadow\", \"bits\": 5, \"values\": {", "(data[\"nir\"] + data[\"red\"]) # Scale to [-1.0 - 1.0] to [0 - 255]", "\"color_ramp\": [ {\"value\": -1.0, \"color\": \"#0000FF\"}, {\"value\": -0.2, \"color\": \"#005050\", }, {\"value\": -0.1,", "0.1, }, \"EPSG:3857\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, }", "\"joviality\": { \"bits\": 3, \"values\": { '0': \"Melancholic\", '1': \"Joyous\", }, \"description\": \"All", "88888}, {\"include\": \"simple.json\", \"type\": \"json\"}]') @pytest.fixture def s3_config_nested_3(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_3.json\" with", "\"values\": { '0': False, '1': True }, }, \"water_observed\": { \"description\": \"Classified as", "\"band2\": \"red\"}, }, \"mpl_ramp\": \"RdYlGn\", \"range\": [-1.0, 1.0] }, { \"index_function\": { \"function\":", "\"#96966e\", }, ] }, }, ] @pytest.fixture def configs_for_combined_fc_wofs(): return [ { \"components\":", "[True] * len(coords), coords) @pytest.fixture def raw_calc_null_mask(): dim_coords = [-2.0, -1.0, 0.0, -1.0,", "s3_config_simple, s3_config_nested_3): config_uri = \"s3://testbucket/nested_4.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 3222, \"things\":", "the file format supports multiple time slices. \"multi-time\": False }, \"netCDF\": { \"renderer\":", "\"flavour\": { \"bits\": 3, \"values\": { '0': \"Bland\", '1': \"Tasty\", }, \"description\": \"All", "{ '0': False, '1': True }, }, \"noncontiguous\": { \"description\": \"At least one", "\"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"Beige\", }, { \"title\": \"Cloud Shadow\",", "False}, \"color\": \"Brown\", }, ] } }, { \"name\": \"observations\", \"title\": \"Observations\", \"abstract\":", "{\"value\": -0.2, \"color\": \"#005050\", }, {\"value\": -0.1, \"color\": \"#505050\", }, {\"value\": -0.01, \"color\":", "\"swir1\": dummy_da(1051, \"swir1\", coords, dtype=np.int16), \"swir2\": dummy_da(1051, \"swir2\", coords, dtype=np.int16), }) return output", "}, { \"components\": { \"red\": {\"swir1\": 1.0}, \"green\": {\"nir\": 1.0}, \"blue\": {\"green\": 1.0},", "[400, 600, 900, 200, 400, 100], dim_coords), \"pq\": dim1_da(\"pq\", [0b000, 0b001, 0b010, 0b011,", "filename. \"extension\": \"tif\", # Whether or not the file format supports multiple time", "} else: pass return mprod dc.index.products.get_by_name = product_by_name return dc @pytest.fixture def minimal_global_cfg():", "-1.0, 0.0, -1.0, -2.0, -3.0] output = xr.Dataset({ \"ir\": dim1_da(\"ir\", [800, 100, 1000,", "\"\", \"flags\": {\"high_slope\": True}, \"color\": \"Brown\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\":", "{\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0},", "}, { \"title\": \"Steep Terrain\", \"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\": \"#776857\", },", "before the Cloud # and High Slopes rules. { \"title\": \"Cloudy Slopes\", \"abstract\":", "{\"test\": 22563, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 22564, \"thing\": {\"include\": \"simple.json\",", "1.0}, }, \"scale_range\": [0, 1024] } ] } } @pytest.fixture def minimal_multiprod_cfg(): return", "} }, \"native_format\": \"GeoTIFF\", } @pytest.fixture def dummy_raw_data(): output = xr.Dataset({ \"ir\": dummy_da(3,", "\"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_nested_4(s3, s3_config_simple, s3_config_nested_3): config_uri = \"s3://testbucket/nested_4.json\" with s3.open(config_uri,", "from unittest.mock import MagicMock import numpy as np import pytest import requests import", "{ \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"#96966e\", }, ] },", "= \"EPSG:9999\" elif 'nativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:4326\" else: pass if 'nonativeres'", "dim_coords), \"blue\": dim1_da(\"blue\", [200, 500, 1000, 600, 100, 700], dim_coords), \"uv\": dim1_da(\"uv\", [400,", "{ \"description\": \"Classified as water by the decision tree\", \"bits\": 7, \"values\": {", "high_slope # or cloud shadow. \"band\": \"water\", \"flags\": { \"low_solar_angle\": False, \"high_slope\": False,", "{\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (10, 800), }, { \"components\": {", "{\"bits\": 0}, \"floop\": {\"bits\": 1}, \"blat\": {\"bits\": 2}, \"pow\": {\"bits\": 3}, \"zap\": {\"bits\":", "\"blue\": {\"npv\": 1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"flags\":", "{\"value\": 1.0, \"color\": \"#FF9090\", }, ] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\":", "\"color_ramp\": [ { \"value\": -1.0, \"color\": \"#000000\", \"alpha\": 0.0, }, { \"value\": 0.0,", "0, \"values\": { '0': False, '1': \"Woah!\" }, \"description\": \"Won't happen. Can't happen.", "{ \"folder.existing_folder\": MagicMock(), } return global_cfg @pytest.fixture def minimal_parent(): parent = MagicMock() parent.abstract", "{ \"bits\": 3, \"values\": { '0': \"Melancholic\", '1': \"Joyous\", }, \"description\": \"All splodgy", "Can't happen. Might happen.\", }, } }) }) return output @pytest.fixture def dummy_raw_ls_data():", "False, '1': True }, }, \"cloud\": { \"description\": \"Cloudy\", \"bits\": 6, \"values\": {", "datetime import time from unittest.mock import MagicMock import numpy as np import pytest", "to 1.0) unscaled = (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) # Scale", "= lmo def product_by_name(s): if 'lookupfail' in s: return None mprod = MagicMock()", "{\"green\": 1.0}, }, \"scale_range\": (200, 1900), }, { \"components\": { \"red\": {\"red\": 1.0},", "\"color\": \"#FF9090\", }, ] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\",", "\"scale_range\": (1000, 8000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0},", "}, \"cloud\": { \"description\": \"Cloudy\", \"bits\": 6, \"values\": { '0': False, '1': True", "{ '0': \"Melancholic\", '1': \"Joyous\", }, \"description\": \"All splodgy looking\" }, \"flavour\": {", "S3 system # adapted from https://github.com/dask/s3fs/blob/main/s3fs/tests/test_s3fs.py import subprocess proc = subprocess.Popen([\"moto_server\", \"s3\", \"-p\",", "\"range\": [-1.0, 1.0], } ] } xyt_coords = [ (\"x\", [-1.0, -0.5, 0.0,", "\"preserve_user_date_order\": True, \"aggregator_function\": { \"function\": \"datacube_ows.band_utils.multi_date_delta\" }, \"mpl_ramp\": \"RdYlBu\", \"range\": [-1.0, 1.0], }", "500, 0, 200, 200, 700], dim_coords), \"green\": dim1_da(\"green\", [100, 500, 0, 400, 300,", "MagicMock import numpy as np import pytest import requests import xarray import xarray", "\"title\": \"Steep Terrain\", \"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\": \"#776857\", }, { \"title\":", "dummy_raw_wo_data(): output = xr.Dataset({ \"water\": dummy_da(0b101, \"red\", coords, dtype=np.uint8, attrs = { \"flags_definition\":", "}, \"times\": times, \"start_time\": times[0], \"end_time\": times[-1], \"time_set\": set(times), \"bboxes\": { \"EPSG:4326\": {\"top\":", "{ \"and\": { \"water_observed\": True, \"cloud\": True } }, \"color\": \"#bad4f2\", }, {", "-3.0] return dim1_da(\"mask\", [True] * len(dim_coords), dim_coords) @pytest.fixture def dummy_col_map_data(): dim_coords = [-2.0,", "coords, dtype=np.int16), \"pv\": dummy_da(723, \"pv\", coords, dtype=np.int16), \"npv\": dummy_da(209, \"npv\", coords, dtype=np.int16), })", "\"description\": \"Terrain shadow\", \"bits\": 3, \"values\": { '0': False, '1': True }, },", "looking\" }, \"flavour\": { \"bits\": 3, \"values\": { '0': \"Bland\", '1': \"Tasty\", },", "{ \"value_map\": { \"water\": [ { # Make noncontiguous data transparent \"title\": \"\",", "\"swir1\", coords, dtype=np.int16), \"swir2\": dummy_da(1051, \"swir2\", coords, dtype=np.int16), }) return output @pytest.fixture def", "\"description\": \"Cloudy\", \"bits\": 6, \"values\": { '0': False, '1': True }, }, \"water_observed\":", "{ '0': False, '1': True }, }, } }) }) return output @pytest.fixture", "\"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"SlateGray\", }, { \"title\": \"Water\", \"abstract\": \"\",", "numpy as np import pytest import requests import xarray import xarray as xr", "\"max\": 0.1, }, \"times\": times, \"start_time\": times[0], \"end_time\": times[-1], \"time_set\": set(times), \"bboxes\": {", "{\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 2574, \"thing\": {\"include\": \"nested_3.json\", \"type\": \"json\"}}]}' )", "0.0, -1.0, -2.0, -3.0] output = xr.Dataset({ \"ir\": dim1_da(\"ir\", [800, 100, 1000, 600,", "\"title\": \"Terrain\", \"abstract\": \"\", # Flag rules can contain an \"or\" - they", "-1.0, -2.0, -3.0] output = xr.Dataset({ \"pq\": dim1_da(\"pq\", [0b01000, 0b11001, 0b01010, 0b10011, 0b00100,", "1.0}, \"blue\": {\"red\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\":", "\"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\":", "= \"s3://testbucket/mixed_nested.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 9364, \\ \"subtest\": {\"test_py\": {\"include\":", "0.5, 1.0]), (\"time\", [ datetime.datetime(2021, 1, 1, 22, 44, 5), datetime.datetime.now() ]) ]", "\"mpl_ramp\": \"ocean_r\", \"range\": [0.0, 1.0] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\":", "# Mask out pixels with low_solar_angle, high_slope # or cloud shadow. \"band\": \"water\",", "False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3577\", \"alias_of\": None, }, \"TEST:CRS\":", "True, \"cloud_shadow\": True } }, \"color\": \"#335277\", }, { \"title\": \"Cloud\", \"abstract\": \"\",", "\"title\": \"High Slopes\", \"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\": \"Brown\", }, { \"title\":", "1.0]), (\"time\", [ datetime.datetime(2021, 1, 1, 22, 44, 5), datetime.datetime.now() ]) ] @pytest.fixture", "dc @pytest.fixture def minimal_global_cfg(): global_cfg = MagicMock() global_cfg.keywords = {\"global\"} global_cfg.attribution.title = \"Global", "f_open.write(b'{\"test\": 1234}') @pytest.fixture def s3_config_nested_1(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_1.json\" with s3.open(config_uri, \"wb\") as", "{ \"flags_definition\": flag_def } } mprod.definition = {\"storage\": {}} if 'nonativecrs' in s:", "configs_for_combined_fc_wofs(): return [ { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\":", "\"scale_to\": (0, 255) } }, }, \"scale_range\": (50, 3000), }, ] @pytest.fixture def", "255] scaled = ((unscaled + 1.0) * 255 / 2).clip(0, 255) return scaled", "{\"value\": 0.0, \"color\": \"black\", }, {\"value\": 0.01, \"color\": \"#303000\", }, {\"value\": 0.5, \"color\":", "Slopes rule needs to come before the Cloud # and High Slopes rules.", "f_open.write(b'{\"test\": 9364, \\ \"subtest\": {\"test_py\": {\"include\": \"tests.cfg.simple.simple\", \"type\": \"python\"}, \\ \"test_json\": {\"include\": \"tests/cfg/simple.json\",", "{\"cloud_shadow\": True, \"high_slope\": True}}, \"color\": \"DarkKhaki\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\":", "\"value\": 0.6, \"color\": \"#e0e070\", }, { \"value\": 1.0, \"color\": \"#90FF90\", } ] },", "\\ {\"test\": 2573, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 2574, \"thing\": {\"include\":", "-0.5, 0.0, 0.5, 1.0]), (\"y\", [-1.0, -0.5, 0.0, 0.5, 1.0]), (\"time\", [ datetime.datetime(2021,", "\"bs\": dummy_da(546, \"bs\", coords, dtype=np.int16), \"pv\": dummy_da(723, \"pv\", coords, dtype=np.int16), \"npv\": dummy_da(209, \"npv\",", "def s3_config_mixed_nested(s3, s3_config_simple): config_uri = \"s3://testbucket/mixed_nested.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 9364,", "True}, \"color\": \"SlateGray\", }, { \"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\":", "r = requests.get(MOTO_S3_ENDPOINT_URI) if r.ok: break except: pass timeout -= 0.1 time.sleep(0.1) yield", "dtype=np.int16), \"green\": dummy_da(7, \"green\", coords, dtype=np.int16), \"blue\": dummy_da(2, \"blue\", coords, dtype=np.int16), \"nir\": dummy_da(101,", "{\"cloud\": True}, \"color\": \"Beige\", }, { \"title\": \"Terrain\", \"abstract\": \"\", # Flag rules", "\"flags\": {\"cloud_shadow\": True}, \"color\": \"#4b4b37\", }, { \"title\": \"Terrain Shadow or Low Sun", "\"Beige\", }, { \"title\": \"Terrain\", \"abstract\": \"\", # Flag rules can contain an", "\"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"#c2c1c0\", }, { \"title\": \"Cloud Shadow\", \"abstract\":", "0, 400, 300, 200], dim_coords), \"blue\": dim1_da(\"blue\", [200, 500, 1000, 600, 100, 700],", "\"geographic\": True, \"vertical_coord_first\": True, \"horizontal_coord\": \"longitude\", \"vertical_coord\": \"latitude\", \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/4326\", \"alias_of\": None, },", "\"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, \"blue\": {\"blue\": 1.0}, }, \"scale_range\":", "400, 300, 200], dim_coords), \"blue\": dim1_da(\"blue\", [200, 500, 1000, 600, 100, 700], dim_coords),", "True }, }, \"color\": \"#2f2922\", }, { \"title\": \"Steep Terrain\", \"abstract\": \"\", \"flags\":", "\"value_map\": { \"water\": [ # Cloudy Slopes rule needs to come before the", "[ { # Mask out nodata pixels. \"band\": \"water\", \"enum\": 1, \"invert\": True,", "= MagicMock() global_cfg.keywords = {\"global\"} global_cfg.attribution.title = \"Global Attribution\" global_cfg.contact_org = None global_cfg.contact_position", "-2.0, -3.0] output = xr.Dataset({ \"pq\": dim1_da(\"pq\", [0b01000, 0b11001, 0b01010, 0b10011, 0b00100, 0b10111],", "{ # Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\":", "{ \"lat\": { \"min\": -0.1, \"max\": 0.1, }, \"lon\": { \"min\": -0.1, \"max\":", "1.0) unscaled = (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) # Scale to", "\"color\": \"DarkKhaki\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"Brown\",", "\"flags\": {\"high_slope\": True}, \"color\": \"#776857\", }, { \"title\": \"Water\", \"abstract\": \"\", \"flags\": {", "\"description\": \"High slope\", \"bits\": 4, \"values\": { '0': False, '1': True } },", "\"green\", coords), \"blue\": dummy_da(2, \"blue\", coords), \"uv\": dummy_da(-1, \"uv\", coords), }) return output", "Cloud # and High Slopes rules. { \"title\": \"Cloudy Slopes\", \"abstract\": \"\", \"flags\":", "{\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\":", "def null_mask(): return dummy_da(True, \"mask\", coords, dtype=np.bool) @pytest.fixture def dummy_raw_calc_data(): dim_coords = [-2.0,", "\"red\", xyt_coords, dtype=\"int16\"), \"green\": dummy_da(700, \"green\", xyt_coords, dtype=\"int16\"), \"blue\": dummy_da(1500, \"blue\", xyt_coords, dtype=\"int16\"),", "\"components\": { \"red\": {\"swir1\": 1.0}, \"green\": {\"nir\": 1.0}, \"blue\": {\"green\": 1.0}, }, \"scale_range\":", "\"alpha\": 1.0, }, { \"value\": 0.3, \"color\": \"#703070\", }, { \"value\": 0.6, \"color\":", "'nativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:4326\" else: pass if 'nonativeres' in s: pass", "to 1.0) return (data[\"nir\"] - data[\"red\"]) / (data[\"nir\"] + data[\"red\"]) return [ {", "1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\":", "}, ], \"multi_date\": [ { \"allowed_count_range\": [2, 2], \"preserve_user_date_order\": True, \"aggregator_function\": { \"function\":", "\"color\": \"#505050\", }, {\"value\": -0.01, \"color\": \"#303030\", }, {\"value\": 0.0, \"color\": \"black\", },", "{ \"band1\": \"green\", \"band2\": \"nir\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } },", "'0': False, '1': True } }, \"cloud_shadow\": { \"description\": \"Cloud shadow\", \"bits\": 5,", "s3_config_simple): config_uri = \"s3://testbucket/nested_3.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 2222, \"things\": [{\"test\":", "\"\", \"flags\": { \"and\": { \"high_slope\": True, \"cloud\": True } }, \"color\": \"#f2dcb4\",", "}, \"lon\": { \"min\": -0.1, \"max\": 0.1, }, \"times\": times, \"start_time\": times[0], \"end_time\":", "not the file format supports multiple time slices. \"multi-time\": False }, \"netCDF\": {", "1.0}, \"blue\": {\"green\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\":", "\"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"Brown\", }, ] } }, {", "mprod.lookup_measurements.return_value = { \"band4\": { \"flags_definition\": flag_def } } mprod.definition = {\"storage\": {}}", "}, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": { \"function\": scaled_ndvi, \"kwargs\": {", "\"title\": \"Cloud Shadow and High Slope\", \"abstract\": \"\", # Flag rules can contain", "\"Brown\", }, ] } }, { \"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\":", "= \"Parent Abstract\" parent.keywords = {\"global\", \"parent\"} parent.attribution.title = \"Parent Attribution\" return parent", "dummy_raw_ls_data(): output = xr.Dataset({ \"red\": dummy_da(5, \"red\", coords, dtype=np.int16), \"green\": dummy_da(7, \"green\", coords,", "= [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] output = xr.Dataset({ \"pq\": dim1_da(\"pq\", [0b01000,", "looking\" }, \"ugly\": { \"bits\": 1, \"values\": { '0': False, '1': True },", "= { \"auth0\": \"http://test.url/auth0\", \"auth1\": \"http://test.url/auth1\", } global_cfg.published_CRSs = { \"EPSG:3857\": { #", "}, { \"value_map\": { \"water\": [ { # Make noncontiguous data transparent \"title\":", "\"scale_range\": [0, 1024] } ] } } @pytest.fixture def minimal_multiprod_cfg(): return { \"title\":", "def product_by_name(s): if 'lookupfail' in s: return None mprod = MagicMock() flag_def =", "\"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"RdYlGn\", \"range\": [-1.0, 1.0] }, {", "non-cloudy high-slopes. { \"title\": \"High Slopes\", \"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\": \"Brown\",", "\"\", \"abstract\": \"\", \"flags\": { \"or\": { \"noncontiguous\": True, \"nodata\": True, }, },", "\"color\": \"#707030\", }, {\"value\": 1.0, \"color\": \"#FF9090\", }, ] }, { \"index_function\": {", "0b11001, 0b01010, 0b10011, 0b00100, 0b10111], dim_coords, attrs={ \"flags_definition\": { \"joviality\": { \"bits\": 3,", "s3_config_simple(s3): config_uri = \"s3://testbucket/simple.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 1234}') @pytest.fixture def", "dummy_da(1051, \"swir1\", coords, dtype=np.int16), \"swir2\": dummy_da(1051, \"swir2\", coords, dtype=np.int16), }) return output @pytest.fixture", "\"band2\": -999, \"band3\": float(\"nan\"), \"band4\": \"nan\", } lmo = MagicMock() lmo.loc = {", "0.5), \"scale_to\": (0, 255) } }, }, \"scale_range\": (50, 3000), }, ] @pytest.fixture", "from s3fs.core import S3FileSystem from tests.utils import (MOTO_PORT, MOTO_S3_ENDPOINT_URI, coords, dim1_da, dummy_da) def", "(\"y\", [-1.0, -0.5, 0.0, 0.5, 1.0]), (\"time\", [ datetime.datetime(2021, 1, 1, 22, 44,", "Band Test Style\", \"abstract\": \"\", \"components\": { \"red\": {\"band1\": 1.0}, \"green\": {\"band1\": 1.0},", "\"flags\": {\"cloud\": True}, \"color\": \"Beige\", }, { \"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\":", "XML \"GeoTIFF\": { \"renderer\": \"datacube_ows.wcs_utils.get_tiff\", # The MIME type of the image, as", "\"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 2574, \"thing\": {\"include\": \"nested_3.json\", \"type\": \"json\"}}]}' ) @pytest.fixture", "}, \"EPSG:3857\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, } }", "'1': \"Joyous\", }, \"description\": \"All splodgy looking\" }, \"flavour\": { \"bits\": 3, \"values\":", "def dummy_raw_fc_data(): output = xr.Dataset({ \"bs\": dummy_da(546, \"bs\", coords, dtype=np.int16), \"pv\": dummy_da(723, \"pv\",", "\"swir1\": 0.5, \"swir2\": 0.5, }, }, \"scale_range\": (50, 3000), }, { \"components\": {", "shadow. \"band\": \"water\", \"flags\": { \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False, } },", "(MOTO_PORT, MOTO_S3_ENDPOINT_URI, coords, dim1_da, dummy_da) def get_boto3_client(): from botocore.session import Session session =", "\"band4\": { \"flags_definition\": flag_def } } mprod.definition = {\"storage\": {}} if 'nonativecrs' in", "add to the filename. \"extension\": \"tif\", # Whether or not the file format", "(50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": { \"function\": \"datacube_ows.band_utils.norm_diff\",", "1.0}, \"blue\": {\"npv\": 1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\",", "600, 100, 700], dim_coords), \"uv\": dim1_da(\"uv\", [400, 600, 900, 200, 400, 100], dim_coords),", "dim_coords, attrs={ \"flags_definition\": { \"splodgy\": { \"bits\": 2, \"values\": { '0': \"Splodgeless\", '1':", "times[0], \"end_time\": times[-1], \"time_set\": set(times), \"bboxes\": { \"EPSG:4326\": {\"top\": 0.1, \"bottom\": -0.1, \"left\":", "{ \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3577\", \"alias_of\": None,", "\"abstract\": \"\", \"flags\": { \"and\": { \"water_observed\": True, \"cloud\": True } }, \"color\":", "0.1, }, \"EPSG:3577\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, \"EPSG:3857\":", "conditions hold. \"flags\": {\"and\": {\"cloud_shadow\": True, \"high_slope\": True}}, \"color\": \"DarkKhaki\", }, { \"title\":", "\"cloud_shadow\": False, } }, { # Mask out pixels with cloud AND no", "/ (data[\"nir\"] + data[\"red\"]) # Scale to [-1.0 - 1.0] to [0 -", "= { \"moo\": {\"bits\": 0}, \"floop\": {\"bits\": 1}, \"blat\": {\"bits\": 2}, \"pow\": {\"bits\":", "}, } }) }) return output @pytest.fixture def dummy_raw_fc_data(): output = xr.Dataset({ \"bs\":", "1.0}, \"green\": {\"nir\": 1.0}, \"blue\": {\"green\": 1.0}, }, \"scale_range\": (50, 3000), }, {", "[ { # Make noncontiguous data transparent \"title\": \"\", \"abstract\": \"\", \"flags\": {", "\"title\": \"The Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"product_name\": \"foo\", \"image_processing\": { \"extent_mask_func\":", "1, 1, 22, 44, 5), datetime.datetime.now() ]) ] @pytest.fixture def xyt_dummydata(): return xarray.Dataset({", "transparent \"title\": \"\", \"abstract\": \"\", \"flags\": { \"or\": { \"noncontiguous\": True, \"nodata\": True,", "\"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\": [ # Cloudy Slopes", "\"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"#96966e\", }, ] }, },", "# WGS-84 \"geographic\": True, \"vertical_coord_first\": True, \"horizontal_coord\": \"longitude\", \"vertical_coord\": \"latitude\", \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/4326\", \"alias_of\":", "}, \"native_format\": \"GeoTIFF\", } @pytest.fixture def dummy_raw_data(): output = xr.Dataset({ \"ir\": dummy_da(3, \"ir\",", "coords, dtype=np.int16), \"blue\": dummy_da(2, \"blue\", coords, dtype=np.int16), \"nir\": dummy_da(101, \"nir\", coords, dtype=np.int16), \"swir1\":", "'0': False, '1': True }, }, \"high_slope\": { \"description\": \"High slope\", \"bits\": 4,", "dim1_da(\"mask\", [True] * len(dim_coords), dim_coords) @pytest.fixture def dummy_col_map_data(): dim_coords = [-2.0, -1.0, 0.0,", "False}, \"color\": \"SaddleBrown\", }, ] } }, { \"value_map\": { \"water\": [ {", "cloud shadow. \"band\": \"water\", \"flags\": { \"low_solar_angle\": False, \"high_slope\": False, \"cloud_shadow\": False, }", "incidence angle\", \"bits\": 2, \"values\": { '0': False, '1': True }, }, \"terrain_shadow\":", "True }, }, \"high_slope\": { \"description\": \"High slope\", \"bits\": 4, \"values\": { '0':", "return global_cfg @pytest.fixture def minimal_parent(): parent = MagicMock() parent.abstract = \"Parent Abstract\" parent.keywords", "\"renderer\": \"datacube_ows.wcs_utils.get_tiff\", # The MIME type of the image, as used in the", "1.0]), (\"y\", [-1.0, -0.5, 0.0, 0.5, 1.0]), (\"time\", [ datetime.datetime(2021, 1, 1, 22,", "[0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"enum\": 1, \"invert\": True, } ]", "@pytest.fixture def s3_config_nested_1(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_1.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"include\":", "in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:9999\" elif 'nativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:4326\" else:", "\"values\": { '0': False, '1': True }, }, } }) }) return output", "\"left\": -0.1, \"right\": 0.1, }, \"EPSG:3857\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\":", "\"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, \"EPSG:3577\": {\"top\": 0.1, \"bottom\": -0.1, \"left\":", "\"scale_range\": (200, 1900), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": ndvi, \"blue\":", "] } }, { \"value_map\": { \"water\": [ { # Make noncontiguous data", "\"foo_nativecrs\": nb, \"foo_nonativecrs\": nb, \"foo\": nb, \"bar\": nb, } dc.list_measurements.return_value = lmo def", "@pytest.fixture def configs_for_combined_fc_wofs(): return [ { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\":", "= \"s3://testbucket/nested_1.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"include\": \"simple.json\", \"type\": \"json\"}') @pytest.fixture def", "as f_open: f_open.write(b'{\"include\": \"simple.json\", \"type\": \"json\"}') @pytest.fixture def s3_config_nested_2(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_2.json\"", "from botocore.session import Session session = Session() return session.create_client(\"s3\", endpoint_url=MOTO_S3_ENDPOINT_URI) @pytest.fixture def s3_base():", "\"time_set\": set(times), \"bboxes\": { \"EPSG:4326\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1,", "output @pytest.fixture def dummy_raw_ls_data(): output = xr.Dataset({ \"red\": dummy_da(5, \"red\", coords, dtype=np.int16), \"green\":", "\"Observations\", \"value_map\": { \"water\": [ # Cloudy Slopes rule needs to come before", "minimal_dc(): dc = MagicMock() nb = MagicMock() nb.index = ['band1', 'band2', 'band3', 'band4']", "\"horizontal_coord\": \"hortizonal_cults\", \"vertical_coord\": \"verbal_tics\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/NATIVE_CRS\", \"alias_of\": None, }, } global_cfg.folder_index", "\"flags\": {\"and\": {\"cloud\": True, \"high_slope\": True}}, \"color\": \"BurlyWood\", }, # Only matches non-cloudy", "as f_open: f_open.write(b'[{\"test\": 88888}, {\"include\": \"simple.json\", \"type\": \"json\"}]') @pytest.fixture def s3_config_nested_3(s3, s3_config_simple): config_uri", "with cloud AND no water observed \"band\": \"water\", \"flags\": { \"cloud\": True, \"water_observed\":", "\"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0}}, \"scale_range\": [0.0, 100.0], }, { \"components\": {", "= ((unscaled + 1.0) * 255 / 2).clip(0, 255) return scaled from datacube_ows.styles.api", "session.create_client(\"s3\", endpoint_url=MOTO_S3_ENDPOINT_URI) @pytest.fixture def s3_base(): # writable local S3 system # adapted from", "\"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\": [ # Cloudy Slopes rule", "\"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"SlateGray\", }, { \"title\": \"Water\",", "\"bar\": nb, } dc.list_measurements.return_value = lmo def product_by_name(s): if 'lookupfail' in s: return", "{\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"swir1\": 1.0},", "multi_date_cfg(): return { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, },", "import MagicMock import numpy as np import pytest import requests import xarray import", "contain an \"and\" - they match if all of the conditions hold. \"flags\":", "product_by_name(s): if 'lookupfail' in s: return None mprod = MagicMock() flag_def = {", "}, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0},", "the Open Data Cube project. # See https://opendatacube.org for more information. # #", "angle\", \"bits\": 2, \"values\": { '0': False, '1': True }, }, \"terrain_shadow\": {", "{\"value\": 0.5, \"color\": \"#707030\", }, {\"value\": 1.0, \"color\": \"#FF9090\", }, ], \"multi_date\": [", "\"simple.json\", \"type\": \"json\"}') @pytest.fixture def s3_config_nested_2(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_2.json\" with s3.open(config_uri, \"wb\")", "part of the Open Data Cube project. # See https://opendatacube.org for more information.", "{ \"swir1\": 1.0, \"scale_range\": (1500, 3700), }, \"green\": { \"nir\": 1.0, \"scale_range\": (1600,", "0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, \"EPSG:3577\": {\"top\": 0.1, \"bottom\": -0.1,", "mock_range(): times = [datetime.datetime(2010, 1, 1), datetime.datetime(2010, 1, 2)] return { \"lat\": {", "f_open.write(b'[{\"test\": 88888}, {\"include\": \"simple.json\", \"type\": \"json\"}]') @pytest.fixture def s3_config_nested_3(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_3.json\"", "def s3_config_nested_4(s3, s3_config_simple, s3_config_nested_3): config_uri = \"s3://testbucket/nested_4.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\":", "{ \"default_style\": \"band1\", \"styles\": [ { \"name\": \"band1\", \"title\": \"Single Band Test Style\",", "\"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {}, \"blue\":", "}, }, \"scale_range\": (50, 3000), }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"mapped_bands\": True,", "\"The Abstract\", \"name\": \"a_layer\", \"multi_product\": True, \"product_names\": [\"foo\", \"bar\"], \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\",", "def dummy_raw_calc_data(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] output = xr.Dataset({", "s3 @pytest.fixture def s3_config_simple(s3): config_uri = \"s3://testbucket/simple.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\":", "or not the file format supports multiple time slices. \"multi-time\": False }, \"netCDF\":", "dummy_da(209, \"npv\", coords, dtype=np.int16), }) return output @pytest.fixture def dummy_raw_fc_plus_wo(dummy_raw_fc_data, dummy_raw_wo_data): return xarray.combine_by_coords(", "} }) }) return output @pytest.fixture def dummy_raw_ls_data(): output = xr.Dataset({ \"red\": dummy_da(5,", "[True] * len(dim_coords), dim_coords) @pytest.fixture def dummy_col_map_data(): dim_coords = [-2.0, -1.0, 0.0, -1.0,", "\"invert\": True, }, { # Mask out pixels with low_solar_angle, high_slope # or", "{\"value\": -0.1, \"color\": \"#505050\", }, {\"value\": -0.01, \"color\": \"#303030\", }, {\"value\": 0.0, \"color\":", "by the decision tree\", \"bits\": 7, \"values\": { '0': False, '1': True },", "{\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"ocean_r\", \"range\": [0.0, 1.0] }, { \"index_function\":", "}, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"#96966e\", }, ]", "\"band\": \"water\", \"enum\": 1, } ] }, { \"components\": { \"red\": {\"bs\": 1.0},", "\"color\": \"Aqua\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"Beige\",", "\"left\": -0.1, \"right\": 0.1, }, } } @pytest.fixture def minimal_global_raw_cfg(): return { \"global\":", "\"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": { \"default_style\": \"band1\", \"styles\": [ { \"name\": \"band1\", \"title\": \"Single", "match if all of the conditions hold. \"flags\": {\"and\": {\"cloud_shadow\": True, \"high_slope\": True}},", "Contributors # SPDX-License-Identifier: Apache-2.0 import datetime import time from unittest.mock import MagicMock import", "import subprocess proc = subprocess.Popen([\"moto_server\", \"s3\", \"-p\", MOTO_PORT]) timeout = 5 while timeout", "\"scale_range\": (1500, 3700), }, \"green\": { \"nir\": 1.0, \"scale_range\": (1600, 3200), }, \"blue\":", "= product_by_name return dc @pytest.fixture def minimal_global_cfg(): global_cfg = MagicMock() global_cfg.keywords = {\"global\"}", "\"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\": \"Brown\", }, { \"title\": \"Cloud\", \"abstract\": \"\",", "0.01, \"color\": \"#303000\", }, {\"value\": 0.5, \"color\": \"#707030\", }, {\"value\": 1.0, \"color\": \"#FF9090\",", "\"title\": \"Cloudy Slopes\", \"abstract\": \"\", \"flags\": {\"and\": {\"cloud\": True, \"high_slope\": True}}, \"color\": \"BurlyWood\",", "(data[\"nir\"] + data[\"red\"]) return [ { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\":", "'1': True }, }, \"low_solar_angle\": { \"description\": \"Low solar incidence angle\", \"bits\": 2,", "S3FileSystem from tests.utils import (MOTO_PORT, MOTO_S3_ENDPOINT_URI, coords, dim1_da, dummy_da) def get_boto3_client(): from botocore.session", "\"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"ocean_r\", \"range\": [0.0, 1.0] }, { \"index_function\": {", "# Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3857\",", "timeout = 5 while timeout > 0: try: r = requests.get(MOTO_S3_ENDPOINT_URI) if r.ok:", "100, 1000, 600, 200, 1000], dim_coords), \"red\": dim1_da(\"red\", [200, 500, 0, 200, 200,", "{ \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50,", "S3FileSystem.clear_instance_cache() s3 = S3FileSystem(anon=False, client_kwargs={\"endpoint_url\": MOTO_S3_ENDPOINT_URI}) s3.invalidate_cache() yield s3 @pytest.fixture def s3_config_simple(s3): config_uri", "\"thing\": null}, \\ {\"test\": 22563, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 22564,", "\"nested_3.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_mixed_nested(s3, s3_config_simple): config_uri = \"s3://testbucket/mixed_nested.json\" with s3.open(config_uri,", "= xr.Dataset({ \"ir\": dummy_da(3, \"ir\", coords), \"red\": dummy_da(5, \"red\", coords), \"green\": dummy_da(7, \"green\",", "\"scale_range\": (50, 3000), }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"mapped_bands\": True, \"kwargs\": {\"band1\":", "\"Won't happen. Can't happen. Might happen.\", }, } }) }) return output @pytest.fixture", "{}} if 'nonativecrs' in s: pass elif 'badnativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:9999\"", "\"bar\"], \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": { \"default_style\": \"band1\", \"styles\": [ {", "{\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, \"alpha\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\":", "mprod.definition = {\"storage\": {}} if 'nonativecrs' in s: pass elif 'badnativecrs' in s:", "to come before the Cloud # and High Slopes rules. { \"title\": \"Cloudy", "file format supports multiple time slices. \"multi-time\": False }, \"netCDF\": { \"renderer\": \"datacube_ows.wcs_utils.get_netcdf\",", "}, {\"value\": 0.01, \"color\": \"#303000\", }, {\"value\": 0.5, \"color\": \"#707030\", }, {\"value\": 1.0,", "coords), \"red\": dummy_da(5, \"red\", coords), \"green\": dummy_da(7, \"green\", coords), \"blue\": dummy_da(2, \"blue\", coords),", "config_uri = \"s3://testbucket/nested_3.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 2222, \"things\": [{\"test\": 22562,", "\"red\": {\"red\": 1.0}, \"green\": { \"function\": scaled_ndvi, \"kwargs\": { \"scale_from\": (0.0, 1.0), \"scale_to\":", "MagicMock() lmo.loc = { \"foo_nativeres\": nb, \"foo_nonativeres\": nb, \"foo_badnativecrs\": nb, \"foo_nativecrs\": nb, \"foo_nonativecrs\":", "while timeout > 0: try: r = requests.get(MOTO_S3_ENDPOINT_URI) if r.ok: break except: pass", "\"TEST:CRS\": { \"geographic\": False, \"horizontal_coord\": \"horrible_zonts\", \"vertical_coord\": \"vertex_calories\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/CRS\", \"alias_of\":", "timeout > 0: try: r = requests.get(MOTO_S3_ENDPOINT_URI) if r.ok: break except: pass timeout", "{\"test_py\": {\"include\": \"tests.cfg.simple.simple\", \"type\": \"python\"}, \\ \"test_json\": {\"include\": \"tests/cfg/simple.json\", \"type\": \"json\"}}}' ) @pytest.fixture", "\"Single Band Test Style\", \"abstract\": \"\", \"components\": { \"red\": {\"band1\": 1.0}, \"green\": {\"band1\":", "MOTO_S3_ENDPOINT_URI}) s3.invalidate_cache() yield s3 @pytest.fixture def s3_config_simple(s3): config_uri = \"s3://testbucket/simple.json\" with s3.open(config_uri, \"wb\")", "splodgy looking\" }, \"splodgy\": { \"bits\": 2, \"values\": { '0': \"Splodgeless\", '1': \"Splodgy\",", "return scaled from datacube_ows.styles.api import scalable @scalable def scaled_ndvi(data): # Calculate NDVI (-1.0", "[ {\"value\": -1.0, \"color\": \"#0000FF\"}, {\"value\": -0.2, \"color\": \"#005050\", }, {\"value\": -0.1, \"color\":", "\"simple.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_nested_4(s3, s3_config_simple, s3_config_nested_3): config_uri = \"s3://testbucket/nested_4.json\" with", "300, 200], dim_coords), \"blue\": dim1_da(\"blue\", [200, 500, 1000, 600, 100, 700], dim_coords), \"uv\":", "(0, 255) } }, \"blue\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"green\", \"band2\":", "\"Cloudy Steep Terrain\", \"abstract\": \"\", \"flags\": { \"and\": { \"high_slope\": True, \"cloud\": True", "{ \"name\": \"band1\", \"title\": \"Single Band Test Style\", \"abstract\": \"\", \"components\": { \"red\":", "\"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3577\", \"alias_of\": None, }, \"TEST:CRS\": { \"geographic\":", "endpoint_url=MOTO_S3_ENDPOINT_URI) @pytest.fixture def s3_base(): # writable local S3 system # adapted from https://github.com/dask/s3fs/blob/main/s3fs/tests/test_s3fs.py", "\"green\": 0.333, \"blue\": 0.333, }, \"green\": {\"nir\": 1.0}, \"blue\": { \"swir1\": 0.5, \"swir2\":", "{\"value\": 0.5, \"color\": \"#707030\", }, {\"value\": 1.0, \"color\": \"#FF9090\", }, ] }, {", "cloud AND no water observed \"band\": \"water\", \"flags\": { \"cloud\": True, \"water_observed\": False,", "@pytest.fixture def mock_range(): times = [datetime.datetime(2010, 1, 1), datetime.datetime(2010, 1, 2)] return {", "1, 2)] return { \"lat\": { \"min\": -0.1, \"max\": 0.1, }, \"lon\": {", "\"All splodgy looking\" }, \"ugly\": { \"bits\": 1, \"values\": { '0': False, '1':", "ndvi(data): # Calculate NDVI (-1.0 to 1.0) unscaled = (data[\"nir\"] - data[\"red\"]) /", "@pytest.fixture def minimal_dc(): dc = MagicMock() nb = MagicMock() nb.index = ['band1', 'band2',", "] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, },", "\"\", \"flags\": {\"cloud\": True}, \"color\": \"Beige\", }, { \"title\": \"Cloud Shadow\", \"abstract\": \"\",", "False, \"water_observed\": False, } } ] }, { \"components\": { \"red\": {\"bs\": 1.0},", "\"red\": {\"band1\": 1.0}, \"green\": {\"band1\": 1.0}, \"blue\": {\"band1\": 1.0}, }, \"scale_range\": [0, 1024]", "client.create_bucket(Bucket=\"testbucket\") S3FileSystem.clear_instance_cache() s3 = S3FileSystem(anon=False, client_kwargs={\"endpoint_url\": MOTO_S3_ENDPOINT_URI}) s3.invalidate_cache() yield s3 @pytest.fixture def s3_config_simple(s3):", "2574, \"thing\": {\"include\": \"nested_3.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_mixed_nested(s3, s3_config_simple): config_uri =", "return { \"title\": \"The Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"multi_product\": True, \"product_names\":", "\"alias_of\": None, }, } global_cfg.folder_index = { \"folder.existing_folder\": MagicMock(), } return global_cfg @pytest.fixture", "{\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000, 8000), }, { \"components\": {", "\"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", }, \"EPSG:4326\": { # WGS-84 \"geographic\": True,", "\"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3857\", \"alias_of\": None, }, \"EPSG:4326\": { # WGS-84", "\"values\": { '0': False, '1': True }, \"description\": \"Real, real ugly\", }, \"impossible\":", "wcs_global_cfg(): return { \"formats\": { # Key is the format name, as used", "} ] }, { \"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\":", "}, \"description\": \"All splodgy looking\" }, \"splodgy\": { \"bits\": 2, \"values\": { '0':", "\"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"#4b4b37\", }, { \"title\":", "False, } }, { # Mask out pixels with cloud AND no water", "}) return output @pytest.fixture def dummy_raw_fc_plus_wo(dummy_raw_fc_data, dummy_raw_wo_data): return xarray.combine_by_coords( [dummy_raw_fc_data, dummy_raw_wo_data], join=\"exact\") @pytest.fixture", "scaled = ((unscaled + 1.0) * 255 / 2).clip(0, 255) return scaled from", "{ \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 1.0), \"scale_to\":", "= None global_cfg.contact_position = None global_cfg.abstract = \"Global Abstract\" global_cfg.authorities = { \"auth0\":", "parent.keywords = {\"global\", \"parent\"} parent.attribution.title = \"Parent Attribution\" return parent @pytest.fixture def minimal_layer_cfg():", "{\"band1\": 1.0}, }, \"scale_range\": [0, 1024] } ] } } @pytest.fixture def mock_range():", "\"cloud\": { \"description\": \"Cloudy\", \"bits\": 6, \"values\": { '0': False, '1': True },", "None, }, \"EPSG:4326\": { # WGS-84 \"geographic\": True, \"vertical_coord_first\": True, \"horizontal_coord\": \"longitude\", \"vertical_coord\":", "= MagicMock() lmo.loc = { \"foo_nativeres\": nb, \"foo_nonativeres\": nb, \"foo_badnativecrs\": nb, \"foo_nativecrs\": nb,", "return { \"lat\": { \"min\": -0.1, \"max\": 0.1, }, \"lon\": { \"min\": -0.1,", "{ \"red\": {\"red\": 1.0}, \"green\": {}, \"blue\": {}, }, \"scale_range\": (50, 3000), },", "[{\"test\": 22562, \"thing\": null}, \\ {\"test\": 22563, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\", "[dummy_raw_fc_data, dummy_raw_wo_data], join=\"exact\") @pytest.fixture def configs_for_landsat(): def ndvi(data): # Calculate NDVI (-1.0 to", "] } }, { \"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\":", "\"noncontiguous\": True, \"nodata\": True, }, }, \"alpha\": 0.0, \"color\": \"#ffffff\", }, { \"title\":", "\"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, \"blue\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\":", "times = [datetime.datetime(2010, 1, 1), datetime.datetime(2010, 1, 2)] return { \"lat\": { \"min\":", "break except: pass timeout -= 0.1 time.sleep(0.1) yield proc.terminate() proc.wait() @pytest.fixture() def s3(s3_base,", "\"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": { \"default_style\": \"band1\", \"styles\": [ { \"name\": \"band1\", \"title\":", "{\"blue\": 1.0}, }, \"scale_range\": (1000, 3000), }, { \"components\": { \"red\": { \"swir1\":", "{\"value\": -1.0, \"color\": \"#0000FF\"}, {\"value\": -0.2, \"color\": \"#005050\", }, {\"value\": -0.1, \"color\": \"#505050\",", "True } }, \"cloud_shadow\": { \"description\": \"Cloud shadow\", \"bits\": 5, \"values\": { '0':", "}, }, \"color\": \"#2f2922\", }, { \"title\": \"Steep Terrain\", \"abstract\": \"\", \"flags\": {\"high_slope\":", "\"\", \"flags\": { \"and\": { \"water_observed\": True, \"cloud_shadow\": True } }, \"color\": \"#335277\",", "1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (10, 800), }, {", "dtype=\"int16\"), \"green\": dummy_da(700, \"green\", xyt_coords, dtype=\"int16\"), \"blue\": dummy_da(1500, \"blue\", xyt_coords, dtype=\"int16\"), \"nir\": dummy_da(2000,", "dtype=np.int16), \"pv\": dummy_da(723, \"pv\", coords, dtype=np.int16), \"npv\": dummy_da(209, \"npv\", coords, dtype=np.int16), }) return", "3000), }, ] @pytest.fixture def configs_for_wofs(): return [ { \"name\": \"observations\", \"title\": \"Observations\",", "{ \"function\": \"datacube_ows.band_utils.multi_date_delta\" }, \"mpl_ramp\": \"RdYlBu\", \"range\": [-1.0, 1.0], } ] } xyt_coords", "\"pq_masks\": [ { \"band\": \"water\", \"enum\": 1, } ] }, { \"components\": {", "}, \"color\": \"#335277\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\":", "high-slopes. { \"title\": \"High Slopes\", \"abstract\": \"\", \"flags\": {\"high_slope\": True}, \"color\": \"Brown\", },", "0.5, \"color\": \"#707030\", }, {\"value\": 1.0, \"color\": \"#FF9090\", }, ], \"multi_date\": [ {", "return output @pytest.fixture def dummy_raw_fc_data(): output = xr.Dataset({ \"bs\": dummy_da(546, \"bs\", coords, dtype=np.int16),", "1.0, \"color\": \"#FF9090\", }, ] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\":", "\"invert\": True, }, ] } ] @pytest.fixture def multi_date_cfg(): return { \"index_function\": {", "\"blue\", coords, dtype=np.int16), \"nir\": dummy_da(101, \"nir\", coords, dtype=np.int16), \"swir1\": dummy_da(1051, \"swir1\", coords, dtype=np.int16),", "}, {\"value\": -0.1, \"color\": \"#505050\", }, {\"value\": -0.01, \"color\": \"#303030\", }, {\"value\": 0.0,", "High Slope\", \"abstract\": \"\", # Flag rules can contain an \"and\" - they", "\"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"Beige\", }, { \"title\": \"Cloud", "} }, \"color\": \"#bad4f2\", }, { \"title\": \"Shaded Water\", \"abstract\": \"\", \"flags\": {", "1, 22, 44, 5), datetime.datetime.now() ]) ] @pytest.fixture def xyt_dummydata(): return xarray.Dataset({ \"red\":", "\"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [ { \"value\": -1.0, \"color\": \"#000000\",", "}, { \"title\": \"Terrain Shadow or Low Sun Angle\", \"abstract\": \"\", \"flags\": {", "True, \"vertical_coord_first\": True }, }, }, \"layers\": [] } @pytest.fixture def wcs_global_cfg(): return", "dummy_da(5, \"red\", coords), \"green\": dummy_da(7, \"green\", coords), \"blue\": dummy_da(2, \"blue\", coords), \"uv\": dummy_da(-1,", "\"EPSG:3577\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, \"EPSG:3857\": {\"top\": 0.1,", "s3_base(): # writable local S3 system # adapted from https://github.com/dask/s3fs/blob/main/s3fs/tests/test_s3fs.py import subprocess proc", "xr.Dataset({ \"pq\": dim1_da(\"pq\", [0b01000, 0b11001, 0b01010, 0b10011, 0b00100, 0b10111], dim_coords, attrs={ \"flags_definition\": {", "dim1_null_mask(coords): return dim1_da(\"mask\", [True] * len(coords), coords) @pytest.fixture def raw_calc_null_mask(): dim_coords = [-2.0,", "1, \"values\": { '0': False, '1': True }, \"description\": \"Real, real ugly\", },", "}, { \"value\": 0.3, \"color\": \"#703070\", }, { \"value\": 0.6, \"color\": \"#e0e070\", },", "\"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", \"vertical_coord_first\": False, \"gml_name\": \"http://www.opengis.net/def/crs/EPSG/0/3857\", \"alias_of\": None, }, \"EPSG:4326\": {", "\"band1\", \"title\": \"Single Band Test Style\", \"abstract\": \"\", \"components\": { \"red\": {\"band1\": 1.0},", "\"vertical_coord_first\": False, \"gml_name\": \"TEST/CRS\", \"alias_of\": None, }, \"TEST:NATIVE_CRS\": { \"geographic\": False, \"horizontal_coord\": \"hortizonal_cults\",", "0.333, \"blue\": 0.333, }, \"green\": {\"nir\": 1.0}, \"blue\": { \"swir1\": 0.5, \"swir2\": 0.5,", "\"green\": dummy_da(700, \"green\", xyt_coords, dtype=\"int16\"), \"blue\": dummy_da(1500, \"blue\", xyt_coords, dtype=\"int16\"), \"nir\": dummy_da(2000, \"nir\",", "{ \"renderer\": \"datacube_ows.wcs_utils.get_tiff\", # The MIME type of the image, as used in", "{ \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\": [ { \"value\":", "dc = MagicMock() nb = MagicMock() nb.index = ['band1', 'band2', 'band3', 'band4'] nb.__getitem__.return_value", "output = xr.Dataset({ \"ir\": dim1_da(\"ir\", [800, 100, 1000, 600, 200, 1000], dim_coords), \"red\":", "False, \"high_slope\": False, \"cloud_shadow\": False, } }, { # Mask out pixels with", "float(\"nan\"), \"band4\": \"nan\", } lmo = MagicMock() lmo.loc = { \"foo_nativeres\": nb, \"foo_nonativeres\":", "mprod.definition[\"storage\"][\"crs\"] = \"EPSG:9999\" elif 'nativecrs' in s: mprod.definition[\"storage\"][\"crs\"] = \"EPSG:4326\" else: pass if", "coords, dtype=np.int16), }) return output @pytest.fixture def dummy_raw_fc_plus_wo(dummy_raw_fc_data, dummy_raw_wo_data): return xarray.combine_by_coords( [dummy_raw_fc_data, dummy_raw_wo_data],", "{ \"water_observed\": True, }, \"color\": \"#4f81bd\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\":", "True }, }, \"noncontiguous\": { \"description\": \"At least one EO band is missing", "water by the decision tree\", \"bits\": 7, \"values\": { '0': False, '1': True", "= { \"band4\": { \"flags_definition\": flag_def } } mprod.definition = {\"storage\": {}} if", "\"All splodgy looking\" }, \"flavour\": { \"bits\": 3, \"values\": { '0': \"Bland\", '1':", "\"#90FF90\", } ] }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0},", "{ \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"Beige\", }, { \"title\":", "{ \"title\": \"Cloud Shadow and High Slope\", \"abstract\": \"\", # Flag rules can", "import xarray as xr from s3fs.core import S3FileSystem from tests.utils import (MOTO_PORT, MOTO_S3_ENDPOINT_URI,", "coords, dtype=np.int16), \"nir\": dummy_da(101, \"nir\", coords, dtype=np.int16), \"swir1\": dummy_da(1051, \"swir1\", coords, dtype=np.int16), \"swir2\":", "None global_cfg.abstract = \"Global Abstract\" global_cfg.authorities = { \"auth0\": \"http://test.url/auth0\", \"auth1\": \"http://test.url/auth1\", }", "set(times), \"bboxes\": { \"EPSG:4326\": {\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, },", "\"function\": scaled_ndvi, \"kwargs\": { \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, \"blue\":", "}, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {},", "}, { \"title\": \"Cloud Shadow and High Slope\", \"abstract\": \"\", # Flag rules", "writable local S3 system # adapted from https://github.com/dask/s3fs/blob/main/s3fs/tests/test_s3fs.py import subprocess proc = subprocess.Popen([\"moto_server\",", "\"water_observed\": True, \"cloud_shadow\": True } }, \"color\": \"#335277\", }, { \"title\": \"Cloud\", \"abstract\":", "* len(dim_coords), dim_coords) @pytest.fixture def dummy_col_map_data(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0,", "} }, { # Mask out pixels with cloud AND no water observed", "\"name\": \"a_layer\", \"multi_product\": True, \"product_names\": [\"foo\", \"bar\"], \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\":", "dummy_da(2, \"blue\", coords, dtype=np.int16), \"nir\": dummy_da(101, \"nir\", coords, dtype=np.int16), \"swir1\": dummy_da(1051, \"swir1\", coords,", "\"scale_range\": (50, 3000), }, { \"components\": { \"red\": { \"red\": 0.333, \"green\": 0.333,", "def minimal_global_cfg(): global_cfg = MagicMock() global_cfg.keywords = {\"global\"} global_cfg.attribution.title = \"Global Attribution\" global_cfg.contact_org", "{\"cloud\": True}, \"color\": \"#c2c1c0\", }, { \"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\":", "}, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"flags\": { \"nodata\": False,", "\"http://test.url/auth0\", \"auth1\": \"http://test.url/auth1\", } global_cfg.published_CRSs = { \"EPSG:3857\": { # Web Mercator \"geographic\":", "True }, }, \"terrain_shadow\": { \"description\": \"Terrain shadow\", \"bits\": 3, \"values\": { '0':", "\"pv\": dummy_da(723, \"pv\", coords, dtype=np.int16), \"npv\": dummy_da(209, \"npv\", coords, dtype=np.int16), }) return output", "\"color\": \"Brown\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\": \"Beige\",", "2017-2021 OWS Contributors # SPDX-License-Identifier: Apache-2.0 import datetime import time from unittest.mock import", "255) } }, }, \"scale_range\": (50, 3000), }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\",", "\"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", }, { \"title\": \"Dry\",", "{ \"noncontiguous\": True, \"nodata\": True, }, }, \"alpha\": 0.0, \"color\": \"#ffffff\", }, {", "dc.list_measurements.return_value = lmo def product_by_name(s): if 'lookupfail' in s: return None mprod =", "{ \"red\": { \"swir1\": 1.0, \"scale_range\": (1500, 3700), }, \"green\": { \"nir\": 1.0,", "coords), \"blue\": dummy_da(2, \"blue\", coords), \"uv\": dummy_da(-1, \"uv\", coords), }) return output @pytest.fixture", "}, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": ndvi, \"blue\": {\"blue\": 1.0}, },", "xarray.combine_by_coords( [dummy_raw_fc_data, dummy_raw_wo_data], join=\"exact\") @pytest.fixture def configs_for_landsat(): def ndvi(data): # Calculate NDVI (-1.0", "Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\": \"y\", }, \"EPSG:4326\": { # WGS-84", "as water by the decision tree\", \"bits\": 7, \"values\": { '0': False, '1':", "{ \"red\": { \"red\": 0.333, \"green\": 0.333, \"blue\": 0.333, }, \"green\": {\"nir\": 1.0},", "], \"published_CRSs\": { \"EPSG:3857\": { # Web Mercator \"geographic\": False, \"horizontal_coord\": \"x\", \"vertical_coord\":", "pass if 'nonativeres' in s: pass elif 'nativeres' in s: mprod.definition[\"storage\"][\"resolution\"] = {", "}, \"low_solar_angle\": { \"description\": \"Low solar incidence angle\", \"bits\": 2, \"values\": { '0':", "1.0}, }, \"scale_range\": (10, 800), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\":", "} }, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": {", "\"EPSG:4326\": { # WGS-84 \"geographic\": True, \"vertical_coord_first\": True, \"horizontal_coord\": \"longitude\", \"vertical_coord\": \"latitude\", \"gml_name\":", "\"abstract\": \"\", \"flags\": { \"and\": { \"high_slope\": True, \"cloud\": True } }, \"color\":", "coords), \"uv\": dummy_da(-1, \"uv\", coords), }) return output @pytest.fixture def null_mask(): return dummy_da(True,", "}, \"scale_range\": (50, 3000), }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"mapped_bands\": True, \"kwargs\":", "> 0: try: r = requests.get(MOTO_S3_ENDPOINT_URI) if r.ok: break except: pass timeout -=", "s: pass elif 'nativeres' in s: mprod.definition[\"storage\"][\"resolution\"] = { \"latitude\": 0.001, \"longitude\": 0.001,", "1.0, \"color\": \"#90FF90\", } ] }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\":", "proc.wait() @pytest.fixture() def s3(s3_base, monkeypatch): monkeypatch.setenv(\"AWS_ACCESS_KEY_ID\", \"foo\") monkeypatch.setenv(\"AWS_SECRET_ACCESS_KEY\", \"bar\") client = get_boto3_client() client.create_bucket(Bucket=\"testbucket\")", "True}, \"color\": \"Aqua\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True}, \"color\":", "(50, 3000), }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"mapped_bands\": True, \"kwargs\": {\"band1\": \"nir\",", "\"blue\": dummy_da(2, \"blue\", coords, dtype=np.int16), \"nir\": dummy_da(101, \"nir\", coords, dtype=np.int16), \"swir1\": dummy_da(1051, \"swir1\",", "Slope\", \"abstract\": \"\", # Flag rules can contain an \"and\" - they match", "{\"value\": 1.0, \"color\": \"#FF9090\", }, ], \"multi_date\": [ { \"allowed_count_range\": [2, 2], \"preserve_user_date_order\":", "f_open.write(b'{\"test\": 2222, \"things\": [{\"test\": 22562, \"thing\": null}, \\ {\"test\": 22563, \"thing\": {\"include\": \"simple.json\",", "@pytest.fixture def flask_client(monkeypatch): monkeypatch.setenv(\"DEFER_CFG_PARSE\", \"yes\") from datacube_ows.ogc import app with app.test_client() as client:", "}, \"color_ramp\": [ {\"value\": -1.0, \"color\": \"#0000FF\"}, {\"value\": -0.2, \"color\": \"#005050\", }, {\"value\":", "[ datetime.datetime(2021, 1, 1, 22, 44, 5), datetime.datetime.now() ]) ] @pytest.fixture def xyt_dummydata():", "}, ] @pytest.fixture def configs_for_wofs(): return [ { \"name\": \"observations\", \"title\": \"Observations\", \"abstract\":", "3000), }, { \"components\": { \"red\": { \"red\": 0.333, \"green\": 0.333, \"blue\": 0.333,", "dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0] output = xr.Dataset({ \"ir\": dim1_da(\"ir\",", "pixels with low_solar_angle, high_slope # or cloud shadow. \"band\": \"water\", \"flags\": { \"low_solar_angle\":", "scaled from datacube_ows.styles.api import scalable @scalable def scaled_ndvi(data): # Calculate NDVI (-1.0 to", "MagicMock() nb.index = ['band1', 'band2', 'band3', 'band4'] nb.__getitem__.return_value = { \"band1\": -999, \"band2\":", "1.0}, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": { \"red\": 0.333,", "\"band\": \"water\", \"enum\": 1, \"invert\": True, } ] }, { \"components\": { \"red\":", "happen. Can't happen. Might happen.\", }, } }) }) return output def dim1_null_mask(coords):", "'0': False, '1': True }, }, \"terrain_shadow\": { \"description\": \"Terrain shadow\", \"bits\": 3,", "{ \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"ocean_r\", \"range\": [0.0,", "rule needs to come before the Cloud # and High Slopes rules. {", "{\"band1\": \"nir\", \"band2\": \"red\"}, }, \"mpl_ramp\": \"RdYlGn\", \"range\": [-1.0, 1.0] }, { \"index_function\":", "xr from s3fs.core import S3FileSystem from tests.utils import (MOTO_PORT, MOTO_S3_ENDPOINT_URI, coords, dim1_da, dummy_da)", "{ \"joviality\": { \"bits\": 3, \"values\": { '0': \"Melancholic\", '1': \"Joyous\", }, \"description\":", "[0, 1024] } ] } } @pytest.fixture def mock_range(): times = [datetime.datetime(2010, 1,", "\"\", \"flags\": {\"water_observed\": False}, \"color\": \"Brown\", }, ] } }, { \"name\": \"observations\",", "config_uri = \"s3://testbucket/mixed_nested.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 9364, \\ \"subtest\": {\"test_py\":", "\"aggregator_function\": { \"function\": \"datacube_ows.band_utils.multi_date_delta\" }, \"mpl_ramp\": \"RdYlBu\", \"range\": [-1.0, 1.0], } ] }", "(\"time\", [ datetime.datetime(2021, 1, 1, 22, 44, 5), datetime.datetime.now() ]) ] @pytest.fixture def", "\"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0}}, \"scale_range\": [0.0,", "from https://github.com/dask/s3fs/blob/main/s3fs/tests/test_s3fs.py import subprocess proc = subprocess.Popen([\"moto_server\", \"s3\", \"-p\", MOTO_PORT]) timeout = 5", "}, \"scale_range\": (1000, 8000), }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\":", "\"s3://testbucket/nested_3.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 2222, \"things\": [{\"test\": 22562, \"thing\": null},", "\"The Abstract\", \"name\": \"a_layer\", \"product_name\": \"foo\", \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": {", "\"foo\", \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": { \"default_style\": \"band1\", \"styles\": [ {", "\"and\": { \"water_observed\": True, \"cloud_shadow\": True } }, \"color\": \"#335277\", }, { \"title\":", "extension to add to the filename. \"extension\": \"tif\", # Whether or not the", "0.0, \"color\": \"black\", }, {\"value\": 0.01, \"color\": \"#303000\", }, {\"value\": 0.5, \"color\": \"#707030\",", "return { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"}, }, \"color_ramp\":", "rules can contain an \"or\" - they match if either of the conditions", "True, \"high_slope\": True}}, \"color\": \"SlateGray\", }, { \"title\": \"Cloud Shadow and High Slope\",", "] }, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\":", "\"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\": [ { \"title\": \"Water\", \"abstract\":", "if r.ok: break except: pass timeout -= 0.1 time.sleep(0.1) yield proc.terminate() proc.wait() @pytest.fixture()", "MagicMock(), } return global_cfg @pytest.fixture def minimal_parent(): parent = MagicMock() parent.abstract = \"Parent", "}) return output @pytest.fixture def dummy_raw_fc_data(): output = xr.Dataset({ \"bs\": dummy_da(546, \"bs\", coords,", ") @pytest.fixture def s3_config_nested_4(s3, s3_config_simple, s3_config_nested_3): config_uri = \"s3://testbucket/nested_4.json\" with s3.open(config_uri, \"wb\") as", "{\"top\": 0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, \"EPSG:3577\": {\"top\": 0.1, \"bottom\":", "} } @pytest.fixture def minimal_multiprod_cfg(): return { \"title\": \"The Title\", \"abstract\": \"The Abstract\",", "\"scale_range\": [0, 1024] } ] } } @pytest.fixture def mock_range(): times = [datetime.datetime(2010,", "# Copyright (c) 2017-2021 OWS Contributors # SPDX-License-Identifier: Apache-2.0 import datetime import time", "\"red\": {\"swir1\": 1.0}, \"green\": {\"nir\": 1.0}, \"blue\": {\"green\": 1.0}, }, \"scale_range\": (50, 3000),", "'0': \"Splodgeless\", '1': \"Splodgy\", }, \"description\": \"All splodgy looking\" }, \"ugly\": { \"bits\":", "return output @pytest.fixture def dummy_raw_wo_data(): output = xr.Dataset({ \"water\": dummy_da(0b101, \"red\", coords, dtype=np.uint8,", "get_boto3_client(): from botocore.session import Session session = Session() return session.create_client(\"s3\", endpoint_url=MOTO_S3_ENDPOINT_URI) @pytest.fixture def", "\"pq_masks\": [ { # Mask out nodata pixels. \"band\": \"water\", \"enum\": 1, \"invert\":", "3, \"values\": { '0': \"Bland\", '1': \"Tasty\", }, \"description\": \"All splodgy looking\" },", "s3_config_nested_2(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_2.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'[{\"test\": 88888}, {\"include\":", "\"components\": { \"red\": { \"red\": 0.333, \"green\": 0.333, \"blue\": 0.333, }, \"green\": {\"nir\":", "\"global\": { \"title\": \"Test Title\", \"info_url\": \"https://my.domain.com/about_us\", \"allowed_urls\": [ \"http://localhost\", \"http://unsecure.domain.com/odc\", \"https://secure.domain.com/ows\", ],", "configs_for_wofs(): return [ { \"name\": \"observations\", \"title\": \"Observations\", \"abstract\": \"Observations\", \"value_map\": { \"water\":", "\"ir\", coords), \"red\": dummy_da(5, \"red\", coords), \"green\": dummy_da(7, \"green\", coords), \"blue\": dummy_da(2, \"blue\",", "\"#bad4f2\", }, { \"title\": \"Shaded Water\", \"abstract\": \"\", \"flags\": { \"and\": { \"water_observed\":", "\"\", \"flags\": {\"and\": {\"cloud\": True, \"high_slope\": True}}, \"color\": \"BurlyWood\", }, # Only matches", "\"\", \"flags\": { \"and\": { \"water_observed\": True, \"cloud\": True } }, \"color\": \"#bad4f2\",", "s3fs.core import S3FileSystem from tests.utils import (MOTO_PORT, MOTO_S3_ENDPOINT_URI, coords, dim1_da, dummy_da) def get_boto3_client():", "'0': False, '1': True }, }, \"noncontiguous\": { \"description\": \"At least one EO", "# and High Slopes rules. { \"title\": \"Cloudy Slopes\", \"abstract\": \"\", \"flags\": {\"and\":", "{ \"bits\": 0, \"description\": \"No data\", \"values\": { '0': False, '1': True },", "dim_coords), \"green\": dim1_da(\"green\", [100, 500, 0, 400, 300, 200], dim_coords), \"blue\": dim1_da(\"blue\", [200,", "image, as used in the Http Response. \"mime\": \"image/geotiff\", # The file extension", "= {\"storage\": {}} if 'nonativecrs' in s: pass elif 'badnativecrs' in s: mprod.definition[\"storage\"][\"crs\"]", "\"DarkKhaki\", }, { \"title\": \"Dry\", \"abstract\": \"\", \"flags\": {\"water_observed\": False}, \"color\": \"Brown\", },", "name, as used in DescribeCoverage XML \"GeoTIFF\": { \"renderer\": \"datacube_ows.wcs_utils.get_tiff\", # The MIME", "{\"include\": \"simple.json\", \"type\": \"json\"}}]}' ) @pytest.fixture def s3_config_nested_4(s3, s3_config_simple, s3_config_nested_3): config_uri = \"s3://testbucket/nested_4.json\"", "{\"high_slope\": True}, \"color\": \"Brown\", }, { \"title\": \"Cloud\", \"abstract\": \"\", \"flags\": {\"cloud\": True},", "out pixels with cloud AND no water observed \"band\": \"water\", \"flags\": { \"cloud\":", "(0.0, 1.0), \"scale_to\": (0, 255) } }, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (50,", "0.5, }, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\": {\"red\": 1.0},", "to [-1.0 - 1.0] to [0 - 255] scaled = ((unscaled + 1.0)", "}, ] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\": \"red\"},", "\"renderer\": \"datacube_ows.wcs_utils.get_netcdf\", \"mime\": \"application/x-netcdf\", \"extension\": \"nc\", \"multi-time\": True, } }, \"native_format\": \"GeoTIFF\", }", "{ \"value\": 0.1, \"color\": \"#000030\", \"alpha\": 1.0, }, { \"value\": 0.3, \"color\": \"#703070\",", "}, }, \"high_slope\": { \"description\": \"High slope\", \"bits\": 4, \"values\": { '0': False,", "global_cfg.contact_org = None global_cfg.contact_position = None global_cfg.abstract = \"Global Abstract\" global_cfg.authorities = {", "'1': \"Woah!\" }, \"description\": \"Won't happen. Can't happen. Might happen.\", }, } })", "{\"pv\": 1.0}, \"blue\": {\"npv\": 1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\":", "100.0], \"pq_masks\": [ { \"band\": \"water\", \"enum\": 1, \"invert\": True, } ] },", "\"flags_definition\": flag_def } } mprod.definition = {\"storage\": {}} if 'nonativecrs' in s: pass", "\"values\": { '0': False, '1': True }, }, \"terrain_shadow\": { \"description\": \"Terrain shadow\",", "\"red\"}, }, \"color_ramp\": [ {\"value\": -1.0, \"color\": \"#0000FF\"}, {\"value\": -0.2, \"color\": \"#005050\", },", "1.0} }, \"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"flags\": { \"nodata\":", "minimal_global_raw_cfg(): return { \"global\": { \"title\": \"Test Title\", \"info_url\": \"https://my.domain.com/about_us\", \"allowed_urls\": [ \"http://localhost\",", "return dummy_da(True, \"mask\", coords, dtype=np.bool) @pytest.fixture def dummy_raw_calc_data(): dim_coords = [-2.0, -1.0, 0.0,", "1.0) * 255 / 2).clip(0, 255) return scaled from datacube_ows.styles.api import scalable @scalable", "\"Beige\", }, { \"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"SlateGray\",", "5, \"values\": { '0': False, '1': True }, }, \"cloud\": { \"description\": \"Cloudy\",", "\"y\", }, \"EPSG:4326\": { # WGS-84 \"geographic\": True, \"vertical_coord_first\": True }, }, },", "} mprod.lookup_measurements.return_value = { \"band4\": { \"flags_definition\": flag_def } } mprod.definition = {\"storage\":", "= get_boto3_client() client.create_bucket(Bucket=\"testbucket\") S3FileSystem.clear_instance_cache() s3 = S3FileSystem(anon=False, client_kwargs={\"endpoint_url\": MOTO_S3_ENDPOINT_URI}) s3.invalidate_cache() yield s3 @pytest.fixture", "\"red\": dummy_da(5, \"red\", coords), \"green\": dummy_da(7, \"green\", coords), \"blue\": dummy_da(2, \"blue\", coords), \"uv\":", "} } @pytest.fixture def mock_range(): times = [datetime.datetime(2010, 1, 1), datetime.datetime(2010, 1, 2)]", "1000], dim_coords), \"red\": dim1_da(\"red\", [200, 500, 0, 200, 200, 700], dim_coords), \"green\": dim1_da(\"green\",", "to [0 - 255] scaled = ((unscaled + 1.0) * 255 / 2).clip(0,", "\"thing\": null}, \\ {\"test\": 2573, \"thing\": {\"include\": \"simple.json\", \"type\": \"json\"}}, \\ {\"test\": 2574,", "\"components\": { \"red\": {\"bs\": 1.0}, \"green\": {\"pv\": 1.0}, \"blue\": {\"npv\": 1.0} }, \"scale_range\":", "global_cfg.abstract = \"Global Abstract\" global_cfg.authorities = { \"auth0\": \"http://test.url/auth0\", \"auth1\": \"http://test.url/auth1\", } global_cfg.published_CRSs", "{ \"function\": scaled_ndvi, \"kwargs\": { \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } },", "\"flags\": {\"water_observed\": False}, \"color\": \"SaddleBrown\", }, ] } }, { \"value_map\": { \"water\":", "+ data[\"red\"]) return [ { \"components\": { \"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0},", "xarray import xarray as xr from s3fs.core import S3FileSystem from tests.utils import (MOTO_PORT,", "\"green\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\", \"band2\": \"red\", \"scale_from\": (0.0, 1.0),", "@pytest.fixture def multi_date_cfg(): return { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {\"band1\": \"nir\", \"band2\":", "}, \"mpl_ramp\": \"RdYlGn\", \"range\": [-1.0, 1.0] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\":", "(c) 2017-2021 OWS Contributors # SPDX-License-Identifier: Apache-2.0 import datetime import time from unittest.mock", "def s3_config_nested_2(s3, s3_config_simple): config_uri = \"s3://testbucket/nested_2.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'[{\"test\": 88888},", "\"components\": { \"red\": {\"red\": 1.0}, \"green\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": { \"band1\": \"nir\",", "100], dim_coords), \"pq\": dim1_da(\"pq\", [0b000, 0b001, 0b010, 0b011, 0b100, 0b111], dim_coords, attrs={ \"flags_definition\":", "unittest.mock import MagicMock import numpy as np import pytest import requests import xarray", "'1': True }, }, \"high_slope\": { \"description\": \"High slope\", \"bits\": 4, \"values\": {", "\"abstract\": \"\", # Flag rules can contain an \"or\" - they match if", "\"blue\": {\"blue\": 1.0}, }, \"scale_range\": (1000, 3000), }, { \"components\": { \"red\": {", "- they match if either of the conditions hold. \"flags\": {\"or\": {\"terrain_shadow\": True,", "\"red\": {\"red\": 1.0}, \"green\": {\"green\": 1.0}, \"blue\": {\"blue\": 1.0}, }, \"scale_range\": (10, 800),", "False, \"horizontal_coord\": \"hortizonal_cults\", \"vertical_coord\": \"verbal_tics\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/NATIVE_CRS\", \"alias_of\": None, }, }", "s3_config_simple): config_uri = \"s3://testbucket/nested_1.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"include\": \"simple.json\", \"type\": \"json\"}')", "\"The Title\", \"abstract\": \"The Abstract\", \"name\": \"a_layer\", \"product_name\": \"foo\", \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\",", "# Make noncontiguous data transparent \"title\": \"\", \"abstract\": \"\", \"flags\": { \"or\": {", "\"band1\": \"green\", \"band2\": \"nir\", \"scale_from\": (0.0, 1.0), \"scale_to\": (0, 255) } }, },", "\"flags\": {\"cloud\": True}, \"color\": \"#c2c1c0\", }, { \"title\": \"Cloud Shadow\", \"abstract\": \"\", \"flags\":", "\"npv\": dummy_da(209, \"npv\", coords, dtype=np.int16), }) return output @pytest.fixture def dummy_raw_fc_plus_wo(dummy_raw_fc_data, dummy_raw_wo_data): return", "@pytest.fixture def minimal_parent(): parent = MagicMock() parent.abstract = \"Parent Abstract\" parent.keywords = {\"global\",", "\"cloud\": True } }, \"color\": \"#f2dcb4\", }, { \"title\": \"Cloudy Water\", \"abstract\": \"\",", "\"flags\": {\"cloud_shadow\": True}, \"color\": \"SlateGray\", }, { \"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\":", "0.5, \"swir2\": 0.5, }, }, \"scale_range\": (50, 3000), }, { \"components\": { \"red\":", "}, { \"title\": \"Water\", \"abstract\": \"\", \"flags\": {\"water_observed\": True}, \"color\": \"Aqua\", }, {", "\"horrible_zonts\", \"vertical_coord\": \"vertex_calories\", \"vertical_coord_first\": False, \"gml_name\": \"TEST/CRS\", \"alias_of\": None, }, \"TEST:NATIVE_CRS\": { \"geographic\":", "def configs_for_landsat(): def ndvi(data): # Calculate NDVI (-1.0 to 1.0) unscaled = (data[\"nir\"]", "format supports multiple time slices. \"multi-time\": False }, \"netCDF\": { \"renderer\": \"datacube_ows.wcs_utils.get_netcdf\", \"mime\":", "config_uri = \"s3://testbucket/nested_4.json\" with s3.open(config_uri, \"wb\") as f_open: f_open.write(b'{\"test\": 3222, \"things\": [{\"test\": 2572,", "output @pytest.fixture def dummy_raw_wo_data(): output = xr.Dataset({ \"water\": dummy_da(0b101, \"red\", coords, dtype=np.uint8, attrs", "0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, \"EPSG:3857\": {\"top\": 0.1, \"bottom\": -0.1,", "\"product_names\": [\"foo\", \"bar\"], \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": { \"default_style\": \"band1\", \"styles\":", "'1': True }, }, } }) }) return output @pytest.fixture def dummy_raw_fc_data(): output", "either of the conditions hold. \"flags\": {\"or\": {\"terrain_shadow\": True, \"high_slope\": True}}, \"color\": \"SlateGray\",", "} lmo = MagicMock() lmo.loc = { \"foo_nativeres\": nb, \"foo_nonativeres\": nb, \"foo_badnativecrs\": nb,", "len(coords), coords) @pytest.fixture def raw_calc_null_mask(): dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0]", "= MagicMock() nb = MagicMock() nb.index = ['band1', 'band2', 'band3', 'band4'] nb.__getitem__.return_value =", "\"#ffffff\", }, { \"title\": \"Cloudy Steep Terrain\", \"abstract\": \"\", \"flags\": { \"and\": {", "import time from unittest.mock import MagicMock import numpy as np import pytest import", "0.1, \"bottom\": -0.1, \"left\": -0.1, \"right\": 0.1, }, } } @pytest.fixture def minimal_global_raw_cfg():", "'0': \"Bland\", '1': \"Tasty\", }, \"description\": \"All splodgy looking\" }, \"splodgy\": { \"bits\":", "True, \"product_names\": [\"foo\", \"bar\"], \"image_processing\": { \"extent_mask_func\": \"datacube_ows.ogc_utils.mask_by_val\", }, \"styling\": { \"default_style\": \"band1\",", "and High Slope\", \"abstract\": \"\", # Flag rules can contain an \"and\" -", "-0.1, \"max\": 0.1, }, \"lon\": { \"min\": -0.1, \"max\": 0.1, }, \"times\": times,", "MOTO_PORT]) timeout = 5 while timeout > 0: try: r = requests.get(MOTO_S3_ENDPOINT_URI) if", "0b01010, 0b10011, 0b00100, 0b10111], dim_coords, attrs={ \"flags_definition\": { \"joviality\": { \"bits\": 3, \"values\":", "\"wb\") as f_open: f_open.write(b'{\"test\": 2222, \"things\": [{\"test\": 22562, \"thing\": null}, \\ {\"test\": 22563,", "\"info_url\": \"https://my.domain.com/about_us\", \"allowed_urls\": [ \"http://localhost\", \"http://unsecure.domain.com/odc\", \"https://secure.domain.com/ows\", ], \"published_CRSs\": { \"EPSG:3857\": { #", "requests import xarray import xarray as xr from s3fs.core import S3FileSystem from tests.utils", "Abstract\" parent.keywords = {\"global\", \"parent\"} parent.attribution.title = \"Parent Attribution\" return parent @pytest.fixture def", "}, { \"components\": { \"red\": {\"red\": 1.0}, \"green\": { \"function\": \"datacube_ows.band_utils.norm_diff\", \"kwargs\": {", "\"red\"}, }, \"mpl_ramp\": \"ocean_r\", \"range\": [0.0, 1.0] }, { \"index_function\": { \"function\": \"datacube_ows.band_utils.norm_diff\",", "Slopes\", \"abstract\": \"\", \"flags\": {\"and\": {\"cloud\": True, \"high_slope\": True}}, \"color\": \"BurlyWood\", }, #", "\"\", \"flags\": {\"cloud_shadow\": True}, \"color\": \"#4b4b37\", }, { \"title\": \"Terrain Shadow or Low", "\"scale_range\": [0.0, 100.0], \"pq_masks\": [ { \"band\": \"water\", \"flags\": { \"nodata\": False, \"noncontiguous\":", "return dim1_da(\"mask\", [True] * len(dim_coords), dim_coords) @pytest.fixture def dummy_col_map_data(): dim_coords = [-2.0, -1.0," ]
[ "if random_binary_array[i] == 0: fix_corners[0][i] = remainder_bounds[i][0] else: fix_corners[0][i] = remainder_bounds[i][1] fix_corners_2d =", "points with fixed rest dimensions corner_points_fixed = np.hstack((corners_d_tilde, fix_corners_2d)) # because 2 **", "corner_points = sample_corner_bounds(self.bounds) num_corner_points = corner_points.shape[0] random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - num_corner_points), x_history=corner_points)", "n - n_tilde if self.n_evals - n_tilde > 0: random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals", "tuple in the list defines the bounds for the corresponding variable Example: [(1,", "number of initial points sampled by method Returns ------- (self.n_evals, len(self.bounds)) numpy array", "sample(self, bounds, n_evals): \"\"\"Returns a numpy array of sampled points. Does not include", "Object Sample low discrepancy sequences when initial point method is not feasible \"\"\"", "self.n_evals - n_tilde > 0: random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - n_tilde), x_history=corner_points_fixed) return", "points random_binary_array = np.random.randint(2, size=(len(self.bounds),)) remainder_bounds = self.bounds[d_tilde:] fix_corners = np.zeros((1, len(remainder_bounds))) for", "variable Example: [(1, 2), (2, 3), (-1, 4)...] n_evals : int number of", "fix_corners = np.zeros((1, len(remainder_bounds))) for i in range(len(remainder_bounds)): if random_binary_array[i] == 0: fix_corners[0][i]", "first which fits in n_evals d_tilde = int(np.floor(np.log2(self.n_evals))) corners_d_tilde = sample_corner_bounds(self.bounds[:d_tilde]) # (2", "else: fix_corners[0][i] = remainder_bounds[i][1] fix_corners_2d = np.tile(fix_corners, (n_tilde, 1)) # (n, d-d_tilde) #", "> 0: random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - n_tilde), x_history=corner_points_fixed) return np.vstack((corner_points_fixed, random_points)) else:", "\"\"\"Returns a numpy array of sampled points. Does not include corner points of", "<= n, sample n - n_tilde if self.n_evals - n_tilde > 0: random_points", "- n_tilde > 0: random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - n_tilde), x_history=corner_points_fixed) return np.vstack((corner_points_fixed,", "sample_corner_bounds(self.bounds) num_corner_points = corner_points.shape[0] random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - num_corner_points), x_history=corner_points) return np.vstack((corner_points,", "---------- sampling_method : grAdapt.sampling.equidistributed Object Sample low discrepancy sequences when initial point method", "space. Parameters ---------- bounds : list of tuples or list of grAdapt.space.datatype.base Each", "n_tilde = 2 ** d_tilde # sample random fixed corner points random_binary_array =", ".base import Initial from grAdapt.utils.sampling import sample_corner_bounds class VerticesForceRandom(Initial): \"\"\" Samples all vertices", "bounds : list of tuples or list of grAdapt.space.datatype.base Each tuple in the", "2 ** d_tilde # sample random fixed corner points random_binary_array = np.random.randint(2, size=(len(self.bounds),))", "initial point method is not feasible \"\"\" super().__init__(sampling_method) def sample(self, bounds, n_evals): \"\"\"Returns", "super().__init__(sampling_method) def sample(self, bounds, n_evals): \"\"\"Returns a numpy array of sampled points. Does", "of vertices is sampled. \"\"\" def __init__(self, sampling_method): \"\"\" Parameters ---------- sampling_method :", "d_tilde, d_tilde) array n_tilde = 2 ** d_tilde # sample random fixed corner", "random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - n_tilde), x_history=corner_points_fixed) return np.vstack((corner_points_fixed, random_points)) else: return corner_points_fixed", "n_evals : int number of initial points sampled by method Returns ------- (self.n_evals,", "sample_corner_bounds(self.bounds[:d_tilde]) # (2 ** d_tilde, d_tilde) array n_tilde = 2 ** d_tilde #", "= sample_corner_bounds(self.bounds) num_corner_points = corner_points.shape[0] random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - num_corner_points), x_history=corner_points) return", "Samples all vertices if n_evals >= 2 ** len(bounds). Else, a subset of", "vertices if n_evals >= 2 ** len(bounds). Else, a subset of vertices is", "2 ** len(self.bounds) >= self.n_evals: # sample corner points first which fits in", "self.n_evals: # sample corner points first which fits in n_evals d_tilde = int(np.floor(np.log2(self.n_evals)))", "a subset of vertices is sampled. \"\"\" def __init__(self, sampling_method): \"\"\" Parameters ----------", "random_binary_array[i] == 0: fix_corners[0][i] = remainder_bounds[i][0] else: fix_corners[0][i] = remainder_bounds[i][1] fix_corners_2d = np.tile(fix_corners,", "n=(self.n_evals - n_tilde), x_history=corner_points_fixed) return np.vstack((corner_points_fixed, random_points)) else: return corner_points_fixed else: corner_points =", "import Initial from grAdapt.utils.sampling import sample_corner_bounds class VerticesForceRandom(Initial): \"\"\" Samples all vertices if", "the list defines the bounds for the corresponding variable Example: [(1, 2), (2,", "tuples or list of grAdapt.space.datatype.base Each tuple in the list defines the bounds", "sampled. \"\"\" def __init__(self, sampling_method): \"\"\" Parameters ---------- sampling_method : grAdapt.sampling.equidistributed Object Sample", "__init__(self, sampling_method): \"\"\" Parameters ---------- sampling_method : grAdapt.sampling.equidistributed Object Sample low discrepancy sequences", "method is not feasible \"\"\" super().__init__(sampling_method) def sample(self, bounds, n_evals): \"\"\"Returns a numpy", "corner points first which fits in n_evals d_tilde = int(np.floor(np.log2(self.n_evals))) corners_d_tilde = sample_corner_bounds(self.bounds[:d_tilde])", "0: random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - n_tilde), x_history=corner_points_fixed) return np.vstack((corner_points_fixed, random_points)) else: return", "def sample(self, bounds, n_evals): \"\"\"Returns a numpy array of sampled points. Does not", "n, sample n - n_tilde if self.n_evals - n_tilde > 0: random_points =", "sampling_method : grAdapt.sampling.equidistributed Object Sample low discrepancy sequences when initial point method is", "def __init__(self, sampling_method): \"\"\" Parameters ---------- sampling_method : grAdapt.sampling.equidistributed Object Sample low discrepancy", "corner points with fixed rest dimensions corner_points_fixed = np.hstack((corners_d_tilde, fix_corners_2d)) # because 2", "Returns ------- (self.n_evals, len(self.bounds)) numpy array \"\"\" super().sample(bounds, n_evals) if 2 ** len(self.bounds)", "remainder_bounds[i][0] else: fix_corners[0][i] = remainder_bounds[i][1] fix_corners_2d = np.tile(fix_corners, (n_tilde, 1)) # (n, d-d_tilde)", "of the hypercube/search space. Parameters ---------- bounds : list of tuples or list", "remainder_bounds = self.bounds[d_tilde:] fix_corners = np.zeros((1, len(remainder_bounds))) for i in range(len(remainder_bounds)): if random_binary_array[i]", "self.bounds[d_tilde:] fix_corners = np.zeros((1, len(remainder_bounds))) for i in range(len(remainder_bounds)): if random_binary_array[i] == 0:", "fits in n_evals d_tilde = int(np.floor(np.log2(self.n_evals))) corners_d_tilde = sample_corner_bounds(self.bounds[:d_tilde]) # (2 ** d_tilde,", "n_evals): \"\"\"Returns a numpy array of sampled points. Does not include corner points", "discrepancy sequences when initial point method is not feasible \"\"\" super().__init__(sampling_method) def sample(self,", "the corresponding variable Example: [(1, 2), (2, 3), (-1, 4)...] n_evals : int", "points sampled by method Returns ------- (self.n_evals, len(self.bounds)) numpy array \"\"\" super().sample(bounds, n_evals)", "# python import warnings # Third party imports import numpy as np #", "Third party imports import numpy as np # grAdapt from .base import Initial", "Parameters ---------- sampling_method : grAdapt.sampling.equidistributed Object Sample low discrepancy sequences when initial point", ": list of tuples or list of grAdapt.space.datatype.base Each tuple in the list", "np.tile(fix_corners, (n_tilde, 1)) # (n, d-d_tilde) # corner points with fixed rest dimensions", "of sampled points. Does not include corner points of the hypercube/search space. Parameters", "(2 ** d_tilde, d_tilde) array n_tilde = 2 ** d_tilde # sample random", "Example: [(1, 2), (2, 3), (-1, 4)...] n_evals : int number of initial", "super().sample(bounds, n_evals) if 2 ** len(self.bounds) >= self.n_evals: # sample corner points first", "2), (2, 3), (-1, 4)...] n_evals : int number of initial points sampled", "2 ** n_tilde <= n, sample n - n_tilde if self.n_evals - n_tilde", "vertices is sampled. \"\"\" def __init__(self, sampling_method): \"\"\" Parameters ---------- sampling_method : grAdapt.sampling.equidistributed", "in the list defines the bounds for the corresponding variable Example: [(1, 2),", "all vertices if n_evals >= 2 ** len(bounds). Else, a subset of vertices", "= remainder_bounds[i][1] fix_corners_2d = np.tile(fix_corners, (n_tilde, 1)) # (n, d-d_tilde) # corner points", "len(self.bounds) >= self.n_evals: # sample corner points first which fits in n_evals d_tilde", "= self.bounds[d_tilde:] fix_corners = np.zeros((1, len(remainder_bounds))) for i in range(len(remainder_bounds)): if random_binary_array[i] ==", "point method is not feasible \"\"\" super().__init__(sampling_method) def sample(self, bounds, n_evals): \"\"\"Returns a", "1)) # (n, d-d_tilde) # corner points with fixed rest dimensions corner_points_fixed =", "Each tuple in the list defines the bounds for the corresponding variable Example:", "n_evals >= 2 ** len(bounds). Else, a subset of vertices is sampled. \"\"\"", "because 2 ** n_tilde <= n, sample n - n_tilde if self.n_evals -", "array of sampled points. Does not include corner points of the hypercube/search space.", "4)...] n_evals : int number of initial points sampled by method Returns -------", "Sample low discrepancy sequences when initial point method is not feasible \"\"\" super().__init__(sampling_method)", "------- (self.n_evals, len(self.bounds)) numpy array \"\"\" super().sample(bounds, n_evals) if 2 ** len(self.bounds) >=", "# sample random fixed corner points random_binary_array = np.random.randint(2, size=(len(self.bounds),)) remainder_bounds = self.bounds[d_tilde:]", "np # grAdapt from .base import Initial from grAdapt.utils.sampling import sample_corner_bounds class VerticesForceRandom(Initial):", "import warnings # Third party imports import numpy as np # grAdapt from", "(n_tilde, 1)) # (n, d-d_tilde) # corner points with fixed rest dimensions corner_points_fixed", "np.hstack((corners_d_tilde, fix_corners_2d)) # because 2 ** n_tilde <= n, sample n - n_tilde", "numpy array \"\"\" super().sample(bounds, n_evals) if 2 ** len(self.bounds) >= self.n_evals: # sample", "(2, 3), (-1, 4)...] n_evals : int number of initial points sampled by", "fixed rest dimensions corner_points_fixed = np.hstack((corners_d_tilde, fix_corners_2d)) # because 2 ** n_tilde <=", "bounds, n_evals): \"\"\"Returns a numpy array of sampled points. Does not include corner", "points of the hypercube/search space. Parameters ---------- bounds : list of tuples or", "list of tuples or list of grAdapt.space.datatype.base Each tuple in the list defines", "grAdapt.space.datatype.base Each tuple in the list defines the bounds for the corresponding variable", "random_binary_array = np.random.randint(2, size=(len(self.bounds),)) remainder_bounds = self.bounds[d_tilde:] fix_corners = np.zeros((1, len(remainder_bounds))) for i", "numpy as np # grAdapt from .base import Initial from grAdapt.utils.sampling import sample_corner_bounds", "which fits in n_evals d_tilde = int(np.floor(np.log2(self.n_evals))) corners_d_tilde = sample_corner_bounds(self.bounds[:d_tilde]) # (2 **", "len(remainder_bounds))) for i in range(len(remainder_bounds)): if random_binary_array[i] == 0: fix_corners[0][i] = remainder_bounds[i][0] else:", "# Third party imports import numpy as np # grAdapt from .base import", "\"\"\" Samples all vertices if n_evals >= 2 ** len(bounds). Else, a subset", "return np.vstack((corner_points_fixed, random_points)) else: return corner_points_fixed else: corner_points = sample_corner_bounds(self.bounds) num_corner_points = corner_points.shape[0]", ": int number of initial points sampled by method Returns ------- (self.n_evals, len(self.bounds))", "** d_tilde, d_tilde) array n_tilde = 2 ** d_tilde # sample random fixed", "grAdapt.utils.sampling import sample_corner_bounds class VerticesForceRandom(Initial): \"\"\" Samples all vertices if n_evals >= 2", "of grAdapt.space.datatype.base Each tuple in the list defines the bounds for the corresponding", "fix_corners_2d = np.tile(fix_corners, (n_tilde, 1)) # (n, d-d_tilde) # corner points with fixed", "if n_evals >= 2 ** len(bounds). Else, a subset of vertices is sampled.", "\"\"\" Parameters ---------- sampling_method : grAdapt.sampling.equidistributed Object Sample low discrepancy sequences when initial", "a numpy array of sampled points. Does not include corner points of the", "d_tilde) array n_tilde = 2 ** d_tilde # sample random fixed corner points", "corners_d_tilde = sample_corner_bounds(self.bounds[:d_tilde]) # (2 ** d_tilde, d_tilde) array n_tilde = 2 **", "range(len(remainder_bounds)): if random_binary_array[i] == 0: fix_corners[0][i] = remainder_bounds[i][0] else: fix_corners[0][i] = remainder_bounds[i][1] fix_corners_2d", "not feasible \"\"\" super().__init__(sampling_method) def sample(self, bounds, n_evals): \"\"\"Returns a numpy array of", "sample n - n_tilde if self.n_evals - n_tilde > 0: random_points = self.sampling_method.sample(bounds=self.bounds,", "else: corner_points = sample_corner_bounds(self.bounds) num_corner_points = corner_points.shape[0] random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - num_corner_points),", "sampled points. Does not include corner points of the hypercube/search space. Parameters ----------", "Initial from grAdapt.utils.sampling import sample_corner_bounds class VerticesForceRandom(Initial): \"\"\" Samples all vertices if n_evals", "= np.hstack((corners_d_tilde, fix_corners_2d)) # because 2 ** n_tilde <= n, sample n -", "len(self.bounds)) numpy array \"\"\" super().sample(bounds, n_evals) if 2 ** len(self.bounds) >= self.n_evals: #", "import sample_corner_bounds class VerticesForceRandom(Initial): \"\"\" Samples all vertices if n_evals >= 2 **", "sample random fixed corner points random_binary_array = np.random.randint(2, size=(len(self.bounds),)) remainder_bounds = self.bounds[d_tilde:] fix_corners", "or list of grAdapt.space.datatype.base Each tuple in the list defines the bounds for", "fix_corners[0][i] = remainder_bounds[i][1] fix_corners_2d = np.tile(fix_corners, (n_tilde, 1)) # (n, d-d_tilde) # corner", ">= self.n_evals: # sample corner points first which fits in n_evals d_tilde =", "np.vstack((corner_points_fixed, random_points)) else: return corner_points_fixed else: corner_points = sample_corner_bounds(self.bounds) num_corner_points = corner_points.shape[0] random_points", "with fixed rest dimensions corner_points_fixed = np.hstack((corners_d_tilde, fix_corners_2d)) # because 2 ** n_tilde", "corner_points_fixed else: corner_points = sample_corner_bounds(self.bounds) num_corner_points = corner_points.shape[0] random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals -", "else: return corner_points_fixed else: corner_points = sample_corner_bounds(self.bounds) num_corner_points = corner_points.shape[0] random_points = self.sampling_method.sample(bounds=self.bounds,", "grAdapt from .base import Initial from grAdapt.utils.sampling import sample_corner_bounds class VerticesForceRandom(Initial): \"\"\" Samples", "corresponding variable Example: [(1, 2), (2, 3), (-1, 4)...] n_evals : int number", "the bounds for the corresponding variable Example: [(1, 2), (2, 3), (-1, 4)...]", "random fixed corner points random_binary_array = np.random.randint(2, size=(len(self.bounds),)) remainder_bounds = self.bounds[d_tilde:] fix_corners =", "VerticesForceRandom(Initial): \"\"\" Samples all vertices if n_evals >= 2 ** len(bounds). Else, a", "subset of vertices is sampled. \"\"\" def __init__(self, sampling_method): \"\"\" Parameters ---------- sampling_method", "not include corner points of the hypercube/search space. Parameters ---------- bounds : list", "(self.n_evals, len(self.bounds)) numpy array \"\"\" super().sample(bounds, n_evals) if 2 ** len(self.bounds) >= self.n_evals:", "fix_corners[0][i] = remainder_bounds[i][0] else: fix_corners[0][i] = remainder_bounds[i][1] fix_corners_2d = np.tile(fix_corners, (n_tilde, 1)) #", "bounds for the corresponding variable Example: [(1, 2), (2, 3), (-1, 4)...] n_evals", "n_evals d_tilde = int(np.floor(np.log2(self.n_evals))) corners_d_tilde = sample_corner_bounds(self.bounds[:d_tilde]) # (2 ** d_tilde, d_tilde) array", "# (2 ** d_tilde, d_tilde) array n_tilde = 2 ** d_tilde # sample", "dimensions corner_points_fixed = np.hstack((corners_d_tilde, fix_corners_2d)) # because 2 ** n_tilde <= n, sample", "party imports import numpy as np # grAdapt from .base import Initial from", "import numpy as np # grAdapt from .base import Initial from grAdapt.utils.sampling import", "# grAdapt from .base import Initial from grAdapt.utils.sampling import sample_corner_bounds class VerticesForceRandom(Initial): \"\"\"", "class VerticesForceRandom(Initial): \"\"\" Samples all vertices if n_evals >= 2 ** len(bounds). Else,", "= 2 ** d_tilde # sample random fixed corner points random_binary_array = np.random.randint(2,", "warnings # Third party imports import numpy as np # grAdapt from .base", ": grAdapt.sampling.equidistributed Object Sample low discrepancy sequences when initial point method is not", "sample_corner_bounds class VerticesForceRandom(Initial): \"\"\" Samples all vertices if n_evals >= 2 ** len(bounds).", "\"\"\" super().__init__(sampling_method) def sample(self, bounds, n_evals): \"\"\"Returns a numpy array of sampled points.", "int number of initial points sampled by method Returns ------- (self.n_evals, len(self.bounds)) numpy", "# (n, d-d_tilde) # corner points with fixed rest dimensions corner_points_fixed = np.hstack((corners_d_tilde,", "of initial points sampled by method Returns ------- (self.n_evals, len(self.bounds)) numpy array \"\"\"", "fix_corners_2d)) # because 2 ** n_tilde <= n, sample n - n_tilde if", "size=(len(self.bounds),)) remainder_bounds = self.bounds[d_tilde:] fix_corners = np.zeros((1, len(remainder_bounds))) for i in range(len(remainder_bounds)): if", "in n_evals d_tilde = int(np.floor(np.log2(self.n_evals))) corners_d_tilde = sample_corner_bounds(self.bounds[:d_tilde]) # (2 ** d_tilde, d_tilde)", "Parameters ---------- bounds : list of tuples or list of grAdapt.space.datatype.base Each tuple", "n_tilde > 0: random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - n_tilde), x_history=corner_points_fixed) return np.vstack((corner_points_fixed, random_points))", "n_evals) if 2 ** len(self.bounds) >= self.n_evals: # sample corner points first which", "# corner points with fixed rest dimensions corner_points_fixed = np.hstack((corners_d_tilde, fix_corners_2d)) # because", "0: fix_corners[0][i] = remainder_bounds[i][0] else: fix_corners[0][i] = remainder_bounds[i][1] fix_corners_2d = np.tile(fix_corners, (n_tilde, 1))", "imports import numpy as np # grAdapt from .base import Initial from grAdapt.utils.sampling", "by method Returns ------- (self.n_evals, len(self.bounds)) numpy array \"\"\" super().sample(bounds, n_evals) if 2", "python import warnings # Third party imports import numpy as np # grAdapt", "= np.tile(fix_corners, (n_tilde, 1)) # (n, d-d_tilde) # corner points with fixed rest", "= self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - n_tilde), x_history=corner_points_fixed) return np.vstack((corner_points_fixed, random_points)) else: return corner_points_fixed else:", "as np # grAdapt from .base import Initial from grAdapt.utils.sampling import sample_corner_bounds class", "int(np.floor(np.log2(self.n_evals))) corners_d_tilde = sample_corner_bounds(self.bounds[:d_tilde]) # (2 ** d_tilde, d_tilde) array n_tilde = 2", "2 ** len(bounds). Else, a subset of vertices is sampled. \"\"\" def __init__(self,", "if 2 ** len(self.bounds) >= self.n_evals: # sample corner points first which fits", "Does not include corner points of the hypercube/search space. Parameters ---------- bounds :", "of tuples or list of grAdapt.space.datatype.base Each tuple in the list defines the", "n_tilde <= n, sample n - n_tilde if self.n_evals - n_tilde > 0:", "points. Does not include corner points of the hypercube/search space. Parameters ---------- bounds", "# because 2 ** n_tilde <= n, sample n - n_tilde if self.n_evals", "sampling_method): \"\"\" Parameters ---------- sampling_method : grAdapt.sampling.equidistributed Object Sample low discrepancy sequences when", "grAdapt.sampling.equidistributed Object Sample low discrepancy sequences when initial point method is not feasible", "for the corresponding variable Example: [(1, 2), (2, 3), (-1, 4)...] n_evals :", "** len(self.bounds) >= self.n_evals: # sample corner points first which fits in n_evals", "== 0: fix_corners[0][i] = remainder_bounds[i][0] else: fix_corners[0][i] = remainder_bounds[i][1] fix_corners_2d = np.tile(fix_corners, (n_tilde,", "is sampled. \"\"\" def __init__(self, sampling_method): \"\"\" Parameters ---------- sampling_method : grAdapt.sampling.equidistributed Object", "corner_points_fixed = np.hstack((corners_d_tilde, fix_corners_2d)) # because 2 ** n_tilde <= n, sample n", "from grAdapt.utils.sampling import sample_corner_bounds class VerticesForceRandom(Initial): \"\"\" Samples all vertices if n_evals >=", "numpy array of sampled points. Does not include corner points of the hypercube/search", "return corner_points_fixed else: corner_points = sample_corner_bounds(self.bounds) num_corner_points = corner_points.shape[0] random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals", "np.zeros((1, len(remainder_bounds))) for i in range(len(remainder_bounds)): if random_binary_array[i] == 0: fix_corners[0][i] = remainder_bounds[i][0]", "= remainder_bounds[i][0] else: fix_corners[0][i] = remainder_bounds[i][1] fix_corners_2d = np.tile(fix_corners, (n_tilde, 1)) # (n,", "sampled by method Returns ------- (self.n_evals, len(self.bounds)) numpy array \"\"\" super().sample(bounds, n_evals) if", "---------- bounds : list of tuples or list of grAdapt.space.datatype.base Each tuple in", "** d_tilde # sample random fixed corner points random_binary_array = np.random.randint(2, size=(len(self.bounds),)) remainder_bounds", "include corner points of the hypercube/search space. Parameters ---------- bounds : list of", "= sample_corner_bounds(self.bounds[:d_tilde]) # (2 ** d_tilde, d_tilde) array n_tilde = 2 ** d_tilde", "is not feasible \"\"\" super().__init__(sampling_method) def sample(self, bounds, n_evals): \"\"\"Returns a numpy array", "d_tilde # sample random fixed corner points random_binary_array = np.random.randint(2, size=(len(self.bounds),)) remainder_bounds =", "x_history=corner_points_fixed) return np.vstack((corner_points_fixed, random_points)) else: return corner_points_fixed else: corner_points = sample_corner_bounds(self.bounds) num_corner_points =", "(-1, 4)...] n_evals : int number of initial points sampled by method Returns", "if self.n_evals - n_tilde > 0: random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - n_tilde), x_history=corner_points_fixed)", "n_tilde), x_history=corner_points_fixed) return np.vstack((corner_points_fixed, random_points)) else: return corner_points_fixed else: corner_points = sample_corner_bounds(self.bounds) num_corner_points", "= np.zeros((1, len(remainder_bounds))) for i in range(len(remainder_bounds)): if random_binary_array[i] == 0: fix_corners[0][i] =", "remainder_bounds[i][1] fix_corners_2d = np.tile(fix_corners, (n_tilde, 1)) # (n, d-d_tilde) # corner points with", "hypercube/search space. Parameters ---------- bounds : list of tuples or list of grAdapt.space.datatype.base", "len(bounds). Else, a subset of vertices is sampled. \"\"\" def __init__(self, sampling_method): \"\"\"", "sequences when initial point method is not feasible \"\"\" super().__init__(sampling_method) def sample(self, bounds,", "array n_tilde = 2 ** d_tilde # sample random fixed corner points random_binary_array", "fixed corner points random_binary_array = np.random.randint(2, size=(len(self.bounds),)) remainder_bounds = self.bounds[d_tilde:] fix_corners = np.zeros((1,", "\"\"\" super().sample(bounds, n_evals) if 2 ** len(self.bounds) >= self.n_evals: # sample corner points", "low discrepancy sequences when initial point method is not feasible \"\"\" super().__init__(sampling_method) def", "** len(bounds). Else, a subset of vertices is sampled. \"\"\" def __init__(self, sampling_method):", "i in range(len(remainder_bounds)): if random_binary_array[i] == 0: fix_corners[0][i] = remainder_bounds[i][0] else: fix_corners[0][i] =", "method Returns ------- (self.n_evals, len(self.bounds)) numpy array \"\"\" super().sample(bounds, n_evals) if 2 **", "(n, d-d_tilde) # corner points with fixed rest dimensions corner_points_fixed = np.hstack((corners_d_tilde, fix_corners_2d))", "corner points random_binary_array = np.random.randint(2, size=(len(self.bounds),)) remainder_bounds = self.bounds[d_tilde:] fix_corners = np.zeros((1, len(remainder_bounds)))", "d-d_tilde) # corner points with fixed rest dimensions corner_points_fixed = np.hstack((corners_d_tilde, fix_corners_2d)) #", "sample corner points first which fits in n_evals d_tilde = int(np.floor(np.log2(self.n_evals))) corners_d_tilde =", "- n_tilde if self.n_evals - n_tilde > 0: random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals -", "list defines the bounds for the corresponding variable Example: [(1, 2), (2, 3),", "the hypercube/search space. Parameters ---------- bounds : list of tuples or list of", ">= 2 ** len(bounds). Else, a subset of vertices is sampled. \"\"\" def", "defines the bounds for the corresponding variable Example: [(1, 2), (2, 3), (-1,", "# sample corner points first which fits in n_evals d_tilde = int(np.floor(np.log2(self.n_evals))) corners_d_tilde", "d_tilde = int(np.floor(np.log2(self.n_evals))) corners_d_tilde = sample_corner_bounds(self.bounds[:d_tilde]) # (2 ** d_tilde, d_tilde) array n_tilde", "initial points sampled by method Returns ------- (self.n_evals, len(self.bounds)) numpy array \"\"\" super().sample(bounds,", "in range(len(remainder_bounds)): if random_binary_array[i] == 0: fix_corners[0][i] = remainder_bounds[i][0] else: fix_corners[0][i] = remainder_bounds[i][1]", "** n_tilde <= n, sample n - n_tilde if self.n_evals - n_tilde >", "- n_tilde), x_history=corner_points_fixed) return np.vstack((corner_points_fixed, random_points)) else: return corner_points_fixed else: corner_points = sample_corner_bounds(self.bounds)", "num_corner_points = corner_points.shape[0] random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - num_corner_points), x_history=corner_points) return np.vstack((corner_points, random_points))", "3), (-1, 4)...] n_evals : int number of initial points sampled by method", "np.random.randint(2, size=(len(self.bounds),)) remainder_bounds = self.bounds[d_tilde:] fix_corners = np.zeros((1, len(remainder_bounds))) for i in range(len(remainder_bounds)):", "for i in range(len(remainder_bounds)): if random_binary_array[i] == 0: fix_corners[0][i] = remainder_bounds[i][0] else: fix_corners[0][i]", "self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - n_tilde), x_history=corner_points_fixed) return np.vstack((corner_points_fixed, random_points)) else: return corner_points_fixed else: corner_points", "array \"\"\" super().sample(bounds, n_evals) if 2 ** len(self.bounds) >= self.n_evals: # sample corner", "= np.random.randint(2, size=(len(self.bounds),)) remainder_bounds = self.bounds[d_tilde:] fix_corners = np.zeros((1, len(remainder_bounds))) for i in", "list of grAdapt.space.datatype.base Each tuple in the list defines the bounds for the", "= int(np.floor(np.log2(self.n_evals))) corners_d_tilde = sample_corner_bounds(self.bounds[:d_tilde]) # (2 ** d_tilde, d_tilde) array n_tilde =", "from .base import Initial from grAdapt.utils.sampling import sample_corner_bounds class VerticesForceRandom(Initial): \"\"\" Samples all", "feasible \"\"\" super().__init__(sampling_method) def sample(self, bounds, n_evals): \"\"\"Returns a numpy array of sampled", "when initial point method is not feasible \"\"\" super().__init__(sampling_method) def sample(self, bounds, n_evals):", "[(1, 2), (2, 3), (-1, 4)...] n_evals : int number of initial points", "corner points of the hypercube/search space. Parameters ---------- bounds : list of tuples", "points first which fits in n_evals d_tilde = int(np.floor(np.log2(self.n_evals))) corners_d_tilde = sample_corner_bounds(self.bounds[:d_tilde]) #", "n_tilde if self.n_evals - n_tilde > 0: random_points = self.sampling_method.sample(bounds=self.bounds, n=(self.n_evals - n_tilde),", "rest dimensions corner_points_fixed = np.hstack((corners_d_tilde, fix_corners_2d)) # because 2 ** n_tilde <= n,", "random_points)) else: return corner_points_fixed else: corner_points = sample_corner_bounds(self.bounds) num_corner_points = corner_points.shape[0] random_points =", "Else, a subset of vertices is sampled. \"\"\" def __init__(self, sampling_method): \"\"\" Parameters", "\"\"\" def __init__(self, sampling_method): \"\"\" Parameters ---------- sampling_method : grAdapt.sampling.equidistributed Object Sample low" ]
[ "= 1 self.__data[word] = self.__word_num self.__word_num += 1 def __getitem__(self, word): if word", "+= 1 def __getitem__(self, word): if word not in self.__data: print('Error! The word", "- 1] = 1 return ret def get_voca_size(self): return self.__word_num - 1 def", "self.__build() def __build(self): self.__word_num = 1 for sentence in self.__sentences: for word in", "def get_word_frequency(self, word): if word not in self.__data: print('Error! The word not in", "class onehot: def __init__(self, sentences): self.__sentences = sentences self.__data = {} self.__count =", "onehot: def __init__(self, sentences): self.__sentences = sentences self.__data = {} self.__count = {}", "= sentences self.__data = {} self.__count = {} self.__build() def __build(self): self.__word_num =", "in self.__data: self.__count[word] += 1 else: self.__count[word] = 1 self.__data[word] = self.__word_num self.__word_num", "get_index_of_word(self, word): if word not in self.__data: print('Error! The word not in it\\'s", "else: self.__count[word] = 1 self.__data[word] = self.__word_num self.__word_num += 1 def __getitem__(self, word):", "in self.__data: print('Error! The word not in it\\'s map!') else: return self.__count[word] def", "not in self.__data: print('Error! The word not in it\\'s map!') else: ret =", "word): if word not in self.__data: print('Error! The word not in it\\'s map!')", "it\\'s map!') else: return self.__count[word] def get_index_of_word(self, word): if word not in self.__data:", "1 self.__data[word] = self.__word_num self.__word_num += 1 def __getitem__(self, word): if word not", "return self.__word_num - 1 def get_word_frequency(self, word): if word not in self.__data: print('Error!", "not in it\\'s map!') else: return self.__count[word] def get_index_of_word(self, word): if word not", "= {} self.__count = {} self.__build() def __build(self): self.__word_num = 1 for sentence", "{} self.__build() def __build(self): self.__word_num = 1 for sentence in self.__sentences: for word", "word in self.__data: self.__count[word] += 1 else: self.__count[word] = 1 self.__data[word] = self.__word_num", "1, 1)) ret[self.__data[word] - 1] = 1 return ret def get_voca_size(self): return self.__word_num", "def __getitem__(self, word): if word not in self.__data: print('Error! The word not in", "1] = 1 return ret def get_voca_size(self): return self.__word_num - 1 def get_word_frequency(self,", "np.zeros((self.__word_num - 1, 1)) ret[self.__data[word] - 1] = 1 return ret def get_voca_size(self):", "- 1, 1)) ret[self.__data[word] - 1] = 1 return ret def get_voca_size(self): return", "import numpy as np import pickle class onehot: def __init__(self, sentences): self.__sentences =", "sentence: if word in self.__data: self.__count[word] += 1 else: self.__count[word] = 1 self.__data[word]", "1 return ret def get_voca_size(self): return self.__word_num - 1 def get_word_frequency(self, word): if", "+= 1 else: self.__count[word] = 1 self.__data[word] = self.__word_num self.__word_num += 1 def", "if word in self.__data: self.__count[word] += 1 else: self.__count[word] = 1 self.__data[word] =", "self.__data: self.__count[word] += 1 else: self.__count[word] = 1 self.__data[word] = self.__word_num self.__word_num +=", "= 1 for sentence in self.__sentences: for word in sentence: if word in", "- 1 def get_word_frequency(self, word): if word not in self.__data: print('Error! The word", "The word not in it\\'s map!') else: ret = np.zeros((self.__word_num - 1, 1))", "= {} self.__build() def __build(self): self.__word_num = 1 for sentence in self.__sentences: for", "= self.__word_num self.__word_num += 1 def __getitem__(self, word): if word not in self.__data:", "word not in self.__data: print('Error! The word not in it\\'s map!') else: return", "in self.__data: print('Error! The word not in it\\'s map!') else: return self.__data[word] -", "sentence in self.__sentences: for word in sentence: if word in self.__data: self.__count[word] +=", "1 else: self.__count[word] = 1 self.__data[word] = self.__word_num self.__word_num += 1 def __getitem__(self,", "ret = np.zeros((self.__word_num - 1, 1)) ret[self.__data[word] - 1] = 1 return ret", "for sentence in self.__sentences: for word in sentence: if word in self.__data: self.__count[word]", "numpy as np import pickle class onehot: def __init__(self, sentences): self.__sentences = sentences", "self.__count = {} self.__build() def __build(self): self.__word_num = 1 for sentence in self.__sentences:", "= np.zeros((self.__word_num - 1, 1)) ret[self.__data[word] - 1] = 1 return ret def", "def __build(self): self.__word_num = 1 for sentence in self.__sentences: for word in sentence:", "self.__word_num - 1 def get_word_frequency(self, word): if word not in self.__data: print('Error! The", "in self.__data: print('Error! The word not in it\\'s map!') else: ret = np.zeros((self.__word_num", "return self.__count[word] def get_index_of_word(self, word): if word not in self.__data: print('Error! The word", "self.__data[word] = self.__word_num self.__word_num += 1 def __getitem__(self, word): if word not in", "ret def get_voca_size(self): return self.__word_num - 1 def get_word_frequency(self, word): if word not", "as np import pickle class onehot: def __init__(self, sentences): self.__sentences = sentences self.__data", "np import pickle class onehot: def __init__(self, sentences): self.__sentences = sentences self.__data =", "= 1 return ret def get_voca_size(self): return self.__word_num - 1 def get_word_frequency(self, word):", "The word not in it\\'s map!') else: return self.__count[word] def get_index_of_word(self, word): if", "in it\\'s map!') else: return self.__count[word] def get_index_of_word(self, word): if word not in", "else: return self.__count[word] def get_index_of_word(self, word): if word not in self.__data: print('Error! The", "def get_voca_size(self): return self.__word_num - 1 def get_word_frequency(self, word): if word not in", "1 for sentence in self.__sentences: for word in sentence: if word in self.__data:", "word not in self.__data: print('Error! The word not in it\\'s map!') else: ret", "else: ret = np.zeros((self.__word_num - 1, 1)) ret[self.__data[word] - 1] = 1 return", "in it\\'s map!') else: ret = np.zeros((self.__word_num - 1, 1)) ret[self.__data[word] - 1]", "self.__count[word] += 1 else: self.__count[word] = 1 self.__data[word] = self.__word_num self.__word_num += 1", "sentences self.__data = {} self.__count = {} self.__build() def __build(self): self.__word_num = 1", "self.__data = {} self.__count = {} self.__build() def __build(self): self.__word_num = 1 for", "1 def get_word_frequency(self, word): if word not in self.__data: print('Error! The word not", "if word not in self.__data: print('Error! The word not in it\\'s map!') else:", "__init__(self, sentences): self.__sentences = sentences self.__data = {} self.__count = {} self.__build() def", "print('Error! The word not in it\\'s map!') else: return self.__count[word] def get_index_of_word(self, word):", "map!') else: ret = np.zeros((self.__word_num - 1, 1)) ret[self.__data[word] - 1] = 1", "self.__sentences: for word in sentence: if word in self.__data: self.__count[word] += 1 else:", "import pickle class onehot: def __init__(self, sentences): self.__sentences = sentences self.__data = {}", "not in self.__data: print('Error! The word not in it\\'s map!') else: return self.__data[word]", "word in sentence: if word in self.__data: self.__count[word] += 1 else: self.__count[word] =", "in self.__sentences: for word in sentence: if word in self.__data: self.__count[word] += 1", "word not in it\\'s map!') else: ret = np.zeros((self.__word_num - 1, 1)) ret[self.__data[word]", "not in it\\'s map!') else: ret = np.zeros((self.__word_num - 1, 1)) ret[self.__data[word] -", "def __init__(self, sentences): self.__sentences = sentences self.__data = {} self.__count = {} self.__build()", "__build(self): self.__word_num = 1 for sentence in self.__sentences: for word in sentence: if", "self.__word_num = 1 for sentence in self.__sentences: for word in sentence: if word", "map!') else: return self.__count[word] def get_index_of_word(self, word): if word not in self.__data: print('Error!", "self.__word_num self.__word_num += 1 def __getitem__(self, word): if word not in self.__data: print('Error!", "__getitem__(self, word): if word not in self.__data: print('Error! The word not in it\\'s", "def get_index_of_word(self, word): if word not in self.__data: print('Error! The word not in", "self.__sentences = sentences self.__data = {} self.__count = {} self.__build() def __build(self): self.__word_num", "1 def __getitem__(self, word): if word not in self.__data: print('Error! The word not", "ret[self.__data[word] - 1] = 1 return ret def get_voca_size(self): return self.__word_num - 1", "self.__count[word] = 1 self.__data[word] = self.__word_num self.__word_num += 1 def __getitem__(self, word): if", "return ret def get_voca_size(self): return self.__word_num - 1 def get_word_frequency(self, word): if word", "it\\'s map!') else: ret = np.zeros((self.__word_num - 1, 1)) ret[self.__data[word] - 1] =", "sentences): self.__sentences = sentences self.__data = {} self.__count = {} self.__build() def __build(self):", "get_voca_size(self): return self.__word_num - 1 def get_word_frequency(self, word): if word not in self.__data:", "self.__data: print('Error! The word not in it\\'s map!') else: return self.__count[word] def get_index_of_word(self,", "self.__data: print('Error! The word not in it\\'s map!') else: ret = np.zeros((self.__word_num -", "word not in it\\'s map!') else: return self.__count[word] def get_index_of_word(self, word): if word", "in sentence: if word in self.__data: self.__count[word] += 1 else: self.__count[word] = 1", "1)) ret[self.__data[word] - 1] = 1 return ret def get_voca_size(self): return self.__word_num -", "self.__data: print('Error! The word not in it\\'s map!') else: return self.__data[word] - 1", "get_word_frequency(self, word): if word not in self.__data: print('Error! The word not in it\\'s", "self.__count[word] def get_index_of_word(self, word): if word not in self.__data: print('Error! The word not", "pickle class onehot: def __init__(self, sentences): self.__sentences = sentences self.__data = {} self.__count", "self.__word_num += 1 def __getitem__(self, word): if word not in self.__data: print('Error! The", "not in self.__data: print('Error! The word not in it\\'s map!') else: return self.__count[word]", "print('Error! The word not in it\\'s map!') else: ret = np.zeros((self.__word_num - 1,", "{} self.__count = {} self.__build() def __build(self): self.__word_num = 1 for sentence in", "for word in sentence: if word in self.__data: self.__count[word] += 1 else: self.__count[word]" ]
[ "from app import app # Change working directory os.chdir(os.path.dirname(__file__)) # Run application app.run(debug=True)", "import os from app import app # Change working directory os.chdir(os.path.dirname(__file__)) # Run", "os from app import app # Change working directory os.chdir(os.path.dirname(__file__)) # Run application", "#! /usr/bin/env python2.7 import os from app import app # Change working directory", "python2.7 import os from app import app # Change working directory os.chdir(os.path.dirname(__file__)) #", "/usr/bin/env python2.7 import os from app import app # Change working directory os.chdir(os.path.dirname(__file__))" ]
[ "accessions.count()) def test_batch_with_two_accessions_in_different_batches(self): session = Session() accession1 = AccessionBuilder().set_batch(\"batch_to_delete\").build() accession2 = AccessionBuilder().set_batch(\"batch_to_preserve\").build() session.add(accession1)", "= session.query(Accession) self.assertEqual(2, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count()) self.assertEqual(accession2,", "session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() restore = create_perfect_match(accession) session.add(accession) session.add(restore) session.commit() accessions", "def setUp(self): create_test_engine() engine = Session().get_bind() Base.metadata.create_all(engine) def test_batch_with_one_accession(self): session = Session() accession", "patsy.database.Session class TestDeleteAccession(unittest.TestCase): def setUp(self): create_test_engine() engine = Session().get_bind() Base.metadata.create_all(engine) def test_batch_with_one_accession(self): session", "restore.perfect_matches[0]) self.assertEqual(restore, accession.perfect_matches[0]) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(0, accessions.count()) self.assertEqual([], restore.perfect_matches)", "= Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() restore = create_perfect_match(accession) session.add(accession) session.add(restore) session.commit() accessions =", "import delete_accessions from patsy.model import Base import unittest from patsy.model import Accession from", "self.assertEqual(1, accessions.count()) self.assertEqual(accession2, accessions.first()) def test_batch_with_accession_with_perfect_match(self): session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() restore", "self.assertEqual(accession, restore.perfect_matches[0]) self.assertEqual(restore, accession.perfect_matches[0]) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(0, accessions.count()) self.assertEqual([],", "session = Session() accession1 = AccessionBuilder().set_batch(\"batch_to_delete\").build() accession2 = AccessionBuilder().set_batch(\"batch_to_preserve\").build() session.add(accession1) session.add(accession2) session.commit() accessions", "Accession from .utils import create_test_engine, AccessionBuilder, create_perfect_match from patsy.perfect_matches import find_perfect_matches Session =", "test_batch_with_one_accession(self): session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() session.add(accession) session.commit() accessions = session.query(Accession) self.assertEqual(1,", "session.add(accession1) session.add(accession2) session.commit() accessions = session.query(Accession) self.assertEqual(2, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions =", "from .utils import create_test_engine, AccessionBuilder, create_perfect_match from patsy.perfect_matches import find_perfect_matches Session = patsy.database.Session", "session.query(Accession) self.assertEqual(2, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count()) self.assertEqual(accession2, accessions.first())", "create_perfect_match from patsy.perfect_matches import find_perfect_matches Session = patsy.database.Session class TestDeleteAccession(unittest.TestCase): def setUp(self): create_test_engine()", "patsy.delete_accessions import delete_accessions from patsy.model import Base import unittest from patsy.model import Accession", "accessions = session.query(Accession) self.assertEqual(1, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(0, accessions.count())", "accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() restore = create_perfect_match(accession) session.add(accession) session.add(restore) session.commit() accessions = session.query(Accession) new_matches_found", "session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() session.add(accession) session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count())", "= session.query(Accession) self.assertEqual(0, accessions.count()) def test_batch_with_two_accessions_in_different_batches(self): session = Session() accession1 = AccessionBuilder().set_batch(\"batch_to_delete\").build() accession2", "def test_batch_with_two_accessions_in_different_batches(self): session = Session() accession1 = AccessionBuilder().set_batch(\"batch_to_delete\").build() accession2 = AccessionBuilder().set_batch(\"batch_to_preserve\").build() session.add(accession1) session.add(accession2)", "= session.query(Accession) self.assertEqual(1, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(0, accessions.count()) def", "AccessionBuilder().set_batch(\"batch_to_delete\").build() restore = create_perfect_match(accession) session.add(accession) session.add(restore) session.commit() accessions = session.query(Accession) new_matches_found = find_perfect_matches(session,", "self.assertEqual(0, accessions.count()) def test_batch_with_two_accessions_in_different_batches(self): session = Session() accession1 = AccessionBuilder().set_batch(\"batch_to_delete\").build() accession2 = AccessionBuilder().set_batch(\"batch_to_preserve\").build()", "restore = create_perfect_match(accession) session.add(accession) session.add(restore) session.commit() accessions = session.query(Accession) new_matches_found = find_perfect_matches(session, accessions)", "new_matches_found = find_perfect_matches(session, accessions) self.assertEqual(1, len(new_matches_found)) self.assertEqual(1, len(accession.perfect_matches)) self.assertEqual(1, len(restore.perfect_matches)) self.assertEqual(accession, restore.perfect_matches[0]) self.assertEqual(restore,", "session.add(accession) session.add(restore) session.commit() accessions = session.query(Accession) new_matches_found = find_perfect_matches(session, accessions) self.assertEqual(1, len(new_matches_found)) self.assertEqual(1,", ".utils import create_test_engine, AccessionBuilder, create_perfect_match from patsy.perfect_matches import find_perfect_matches Session = patsy.database.Session class", "setUp(self): create_test_engine() engine = Session().get_bind() Base.metadata.create_all(engine) def test_batch_with_one_accession(self): session = Session() accession =", "session.commit() accessions = session.query(Accession) self.assertEqual(2, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(1,", "self.assertEqual(accession2, accessions.first()) def test_batch_with_accession_with_perfect_match(self): session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() restore = create_perfect_match(accession)", "accession2 = AccessionBuilder().set_batch(\"batch_to_preserve\").build() session.add(accession1) session.add(accession2) session.commit() accessions = session.query(Accession) self.assertEqual(2, accessions.count()) delete_accessions(session, \"batch_to_delete\")", "patsy.model import Accession from .utils import create_test_engine, AccessionBuilder, create_perfect_match from patsy.perfect_matches import find_perfect_matches", "delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count()) self.assertEqual(accession2, accessions.first()) def test_batch_with_accession_with_perfect_match(self): session", "<filename>tests/test_delete_accessions.py<gh_stars>0 import patsy.database from patsy.delete_accessions import delete_accessions from patsy.model import Base import unittest", "Session().get_bind() Base.metadata.create_all(engine) def test_batch_with_one_accession(self): session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() session.add(accession) session.commit() accessions", "AccessionBuilder().set_batch(\"batch_to_preserve\").build() session.add(accession1) session.add(accession2) session.commit() accessions = session.query(Accession) self.assertEqual(2, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions", "accession1 = AccessionBuilder().set_batch(\"batch_to_delete\").build() accession2 = AccessionBuilder().set_batch(\"batch_to_preserve\").build() session.add(accession1) session.add(accession2) session.commit() accessions = session.query(Accession) self.assertEqual(2,", "AccessionBuilder().set_batch(\"batch_to_delete\").build() accession2 = AccessionBuilder().set_batch(\"batch_to_preserve\").build() session.add(accession1) session.add(accession2) session.commit() accessions = session.query(Accession) self.assertEqual(2, accessions.count()) delete_accessions(session,", "accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count()) self.assertEqual(accession2, accessions.first()) def test_batch_with_accession_with_perfect_match(self):", "Session = patsy.database.Session class TestDeleteAccession(unittest.TestCase): def setUp(self): create_test_engine() engine = Session().get_bind() Base.metadata.create_all(engine) def", "import find_perfect_matches Session = patsy.database.Session class TestDeleteAccession(unittest.TestCase): def setUp(self): create_test_engine() engine = Session().get_bind()", "\"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(0, accessions.count()) def test_batch_with_two_accessions_in_different_batches(self): session = Session() accession1", "accessions = session.query(Accession) self.assertEqual(1, accessions.count()) self.assertEqual(accession2, accessions.first()) def test_batch_with_accession_with_perfect_match(self): session = Session() accession", "patsy.model import Base import unittest from patsy.model import Accession from .utils import create_test_engine,", "session.query(Accession) self.assertEqual(1, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(0, accessions.count()) def test_batch_with_two_accessions_in_different_batches(self):", "= Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() session.add(accession) session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count()) delete_accessions(session,", "\"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count()) self.assertEqual(accession2, accessions.first()) def test_batch_with_accession_with_perfect_match(self): session =", "import patsy.database from patsy.delete_accessions import delete_accessions from patsy.model import Base import unittest from", "find_perfect_matches(session, accessions) self.assertEqual(1, len(new_matches_found)) self.assertEqual(1, len(accession.perfect_matches)) self.assertEqual(1, len(restore.perfect_matches)) self.assertEqual(accession, restore.perfect_matches[0]) self.assertEqual(restore, accession.perfect_matches[0]) delete_accessions(session,", "create_test_engine() engine = Session().get_bind() Base.metadata.create_all(engine) def test_batch_with_one_accession(self): session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build()", "len(restore.perfect_matches)) self.assertEqual(accession, restore.perfect_matches[0]) self.assertEqual(restore, accession.perfect_matches[0]) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(0, accessions.count())", "= AccessionBuilder().set_batch(\"batch_to_delete\").build() restore = create_perfect_match(accession) session.add(accession) session.add(restore) session.commit() accessions = session.query(Accession) new_matches_found =", "accessions = session.query(Accession) new_matches_found = find_perfect_matches(session, accessions) self.assertEqual(1, len(new_matches_found)) self.assertEqual(1, len(accession.perfect_matches)) self.assertEqual(1, len(restore.perfect_matches))", "accessions = session.query(Accession) self.assertEqual(0, accessions.count()) def test_batch_with_two_accessions_in_different_batches(self): session = Session() accession1 = AccessionBuilder().set_batch(\"batch_to_delete\").build()", "from patsy.model import Accession from .utils import create_test_engine, AccessionBuilder, create_perfect_match from patsy.perfect_matches import", "session.add(restore) session.commit() accessions = session.query(Accession) new_matches_found = find_perfect_matches(session, accessions) self.assertEqual(1, len(new_matches_found)) self.assertEqual(1, len(accession.perfect_matches))", "self.assertEqual(2, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count()) self.assertEqual(accession2, accessions.first()) def", "from patsy.model import Base import unittest from patsy.model import Accession from .utils import", "AccessionBuilder().set_batch(\"batch_to_delete\").build() session.add(accession) session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions =", "delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(0, accessions.count()) def test_batch_with_two_accessions_in_different_batches(self): session = Session()", "create_test_engine, AccessionBuilder, create_perfect_match from patsy.perfect_matches import find_perfect_matches Session = patsy.database.Session class TestDeleteAccession(unittest.TestCase): def", "Base import unittest from patsy.model import Accession from .utils import create_test_engine, AccessionBuilder, create_perfect_match", "self.assertEqual(1, len(restore.perfect_matches)) self.assertEqual(accession, restore.perfect_matches[0]) self.assertEqual(restore, accession.perfect_matches[0]) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(0,", "find_perfect_matches Session = patsy.database.Session class TestDeleteAccession(unittest.TestCase): def setUp(self): create_test_engine() engine = Session().get_bind() Base.metadata.create_all(engine)", "accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(0, accessions.count()) def test_batch_with_two_accessions_in_different_batches(self): session =", "accessions) self.assertEqual(1, len(new_matches_found)) self.assertEqual(1, len(accession.perfect_matches)) self.assertEqual(1, len(restore.perfect_matches)) self.assertEqual(accession, restore.perfect_matches[0]) self.assertEqual(restore, accession.perfect_matches[0]) delete_accessions(session, \"batch_to_delete\")", "session.query(Accession) self.assertEqual(0, accessions.count()) def test_batch_with_two_accessions_in_different_batches(self): session = Session() accession1 = AccessionBuilder().set_batch(\"batch_to_delete\").build() accession2 =", "session.add(accession2) session.commit() accessions = session.query(Accession) self.assertEqual(2, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession)", "accessions = session.query(Accession) self.assertEqual(2, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count())", "engine = Session().get_bind() Base.metadata.create_all(engine) def test_batch_with_one_accession(self): session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() session.add(accession)", "= patsy.database.Session class TestDeleteAccession(unittest.TestCase): def setUp(self): create_test_engine() engine = Session().get_bind() Base.metadata.create_all(engine) def test_batch_with_one_accession(self):", "len(new_matches_found)) self.assertEqual(1, len(accession.perfect_matches)) self.assertEqual(1, len(restore.perfect_matches)) self.assertEqual(accession, restore.perfect_matches[0]) self.assertEqual(restore, accession.perfect_matches[0]) delete_accessions(session, \"batch_to_delete\") session.commit() accessions", "unittest from patsy.model import Accession from .utils import create_test_engine, AccessionBuilder, create_perfect_match from patsy.perfect_matches", "session.query(Accession) self.assertEqual(1, accessions.count()) self.assertEqual(accession2, accessions.first()) def test_batch_with_accession_with_perfect_match(self): session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build()", "TestDeleteAccession(unittest.TestCase): def setUp(self): create_test_engine() engine = Session().get_bind() Base.metadata.create_all(engine) def test_batch_with_one_accession(self): session = Session()", "self.assertEqual(1, len(accession.perfect_matches)) self.assertEqual(1, len(restore.perfect_matches)) self.assertEqual(accession, restore.perfect_matches[0]) self.assertEqual(restore, accession.perfect_matches[0]) delete_accessions(session, \"batch_to_delete\") session.commit() accessions =", "AccessionBuilder, create_perfect_match from patsy.perfect_matches import find_perfect_matches Session = patsy.database.Session class TestDeleteAccession(unittest.TestCase): def setUp(self):", "= AccessionBuilder().set_batch(\"batch_to_delete\").build() accession2 = AccessionBuilder().set_batch(\"batch_to_preserve\").build() session.add(accession1) session.add(accession2) session.commit() accessions = session.query(Accession) self.assertEqual(2, accessions.count())", "from patsy.perfect_matches import find_perfect_matches Session = patsy.database.Session class TestDeleteAccession(unittest.TestCase): def setUp(self): create_test_engine() engine", "= AccessionBuilder().set_batch(\"batch_to_preserve\").build() session.add(accession1) session.add(accession2) session.commit() accessions = session.query(Accession) self.assertEqual(2, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit()", "patsy.perfect_matches import find_perfect_matches Session = patsy.database.Session class TestDeleteAccession(unittest.TestCase): def setUp(self): create_test_engine() engine =", "= Session() accession1 = AccessionBuilder().set_batch(\"batch_to_delete\").build() accession2 = AccessionBuilder().set_batch(\"batch_to_preserve\").build() session.add(accession1) session.add(accession2) session.commit() accessions =", "= AccessionBuilder().set_batch(\"batch_to_delete\").build() session.add(accession) session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions", "session.commit() accessions = session.query(Accession) self.assertEqual(0, accessions.count()) def test_batch_with_two_accessions_in_different_batches(self): session = Session() accession1 =", "accessions.count()) self.assertEqual(accession2, accessions.first()) def test_batch_with_accession_with_perfect_match(self): session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() restore =", "def test_batch_with_accession_with_perfect_match(self): session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() restore = create_perfect_match(accession) session.add(accession) session.add(restore)", "self.assertEqual(1, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(0, accessions.count()) def test_batch_with_two_accessions_in_different_batches(self): session", "session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count()) self.assertEqual(accession2, accessions.first()) def test_batch_with_accession_with_perfect_match(self): session = Session()", "accessions.first()) def test_batch_with_accession_with_perfect_match(self): session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() restore = create_perfect_match(accession) session.add(accession)", "self.assertEqual(1, len(new_matches_found)) self.assertEqual(1, len(accession.perfect_matches)) self.assertEqual(1, len(restore.perfect_matches)) self.assertEqual(accession, restore.perfect_matches[0]) self.assertEqual(restore, accession.perfect_matches[0]) delete_accessions(session, \"batch_to_delete\") session.commit()", "create_perfect_match(accession) session.add(accession) session.add(restore) session.commit() accessions = session.query(Accession) new_matches_found = find_perfect_matches(session, accessions) self.assertEqual(1, len(new_matches_found))", "patsy.database from patsy.delete_accessions import delete_accessions from patsy.model import Base import unittest from patsy.model", "test_batch_with_accession_with_perfect_match(self): session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() restore = create_perfect_match(accession) session.add(accession) session.add(restore) session.commit()", "= session.query(Accession) new_matches_found = find_perfect_matches(session, accessions) self.assertEqual(1, len(new_matches_found)) self.assertEqual(1, len(accession.perfect_matches)) self.assertEqual(1, len(restore.perfect_matches)) self.assertEqual(accession,", "len(accession.perfect_matches)) self.assertEqual(1, len(restore.perfect_matches)) self.assertEqual(accession, restore.perfect_matches[0]) self.assertEqual(restore, accession.perfect_matches[0]) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession)", "delete_accessions from patsy.model import Base import unittest from patsy.model import Accession from .utils", "Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() restore = create_perfect_match(accession) session.add(accession) session.add(restore) session.commit() accessions = session.query(Accession)", "import Base import unittest from patsy.model import Accession from .utils import create_test_engine, AccessionBuilder,", "Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() session.add(accession) session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count()) delete_accessions(session, \"batch_to_delete\")", "class TestDeleteAccession(unittest.TestCase): def setUp(self): create_test_engine() engine = Session().get_bind() Base.metadata.create_all(engine) def test_batch_with_one_accession(self): session =", "accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() session.add(accession) session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit()", "= Session().get_bind() Base.metadata.create_all(engine) def test_batch_with_one_accession(self): session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() session.add(accession) session.commit()", "import Accession from .utils import create_test_engine, AccessionBuilder, create_perfect_match from patsy.perfect_matches import find_perfect_matches Session", "import unittest from patsy.model import Accession from .utils import create_test_engine, AccessionBuilder, create_perfect_match from", "session.add(accession) session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession)", "session.commit() accessions = session.query(Accession) new_matches_found = find_perfect_matches(session, accessions) self.assertEqual(1, len(new_matches_found)) self.assertEqual(1, len(accession.perfect_matches)) self.assertEqual(1,", "= create_perfect_match(accession) session.add(accession) session.add(restore) session.commit() accessions = session.query(Accession) new_matches_found = find_perfect_matches(session, accessions) self.assertEqual(1,", "session.query(Accession) new_matches_found = find_perfect_matches(session, accessions) self.assertEqual(1, len(new_matches_found)) self.assertEqual(1, len(accession.perfect_matches)) self.assertEqual(1, len(restore.perfect_matches)) self.assertEqual(accession, restore.perfect_matches[0])", "session.commit() accessions = session.query(Accession) self.assertEqual(1, accessions.count()) delete_accessions(session, \"batch_to_delete\") session.commit() accessions = session.query(Accession) self.assertEqual(0,", "= session.query(Accession) self.assertEqual(1, accessions.count()) self.assertEqual(accession2, accessions.first()) def test_batch_with_accession_with_perfect_match(self): session = Session() accession =", "def test_batch_with_one_accession(self): session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() session.add(accession) session.commit() accessions = session.query(Accession)", "Session() accession1 = AccessionBuilder().set_batch(\"batch_to_delete\").build() accession2 = AccessionBuilder().set_batch(\"batch_to_preserve\").build() session.add(accession1) session.add(accession2) session.commit() accessions = session.query(Accession)", "test_batch_with_two_accessions_in_different_batches(self): session = Session() accession1 = AccessionBuilder().set_batch(\"batch_to_delete\").build() accession2 = AccessionBuilder().set_batch(\"batch_to_preserve\").build() session.add(accession1) session.add(accession2) session.commit()", "from patsy.delete_accessions import delete_accessions from patsy.model import Base import unittest from patsy.model import", "= find_perfect_matches(session, accessions) self.assertEqual(1, len(new_matches_found)) self.assertEqual(1, len(accession.perfect_matches)) self.assertEqual(1, len(restore.perfect_matches)) self.assertEqual(accession, restore.perfect_matches[0]) self.assertEqual(restore, accession.perfect_matches[0])", "import create_test_engine, AccessionBuilder, create_perfect_match from patsy.perfect_matches import find_perfect_matches Session = patsy.database.Session class TestDeleteAccession(unittest.TestCase):", "Base.metadata.create_all(engine) def test_batch_with_one_accession(self): session = Session() accession = AccessionBuilder().set_batch(\"batch_to_delete\").build() session.add(accession) session.commit() accessions =" ]
[ "but you would need to construct data files that match the structure of", "self.EncodingCoversionTable=self._buildTranslationTable() def ReturnConversionSet(self,primaryCode,includeRelationship=False): \"\"\" Returns set of codes that represent the mapping of", ": str ICD10 code string. associated_string : str String defining code in codebook.", "file with open(hierarchyFile,'rb') as f: currentParentList = [] for line in f: line=self._convertToUnicode(line)", "ICD10 (UKBB) Parameters ---------- primaryEncoding : ICDUtilities, optional First encoding. The default is", "else: parent = code.parent_code if parent.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([parent.code])] translation_table['Relationship']+=['Parent']", "if providing one or the other.\" self.primaryEncoding=primaryEncoding self.secondaryEncoding=secondaryEncoding if primaryEncoding is None: try:", "chapter heading for input code Parameters ---------- code : str ICD10 code. Returns", "if len(currentParentList) == 0: intVal = self._convertCodeToIntVal(parsedLine[1][0:3]) try: icd_chapter = chapter_list[next(x[0] for x", "ICD10-CM and ICD10 (UKBB) Parameters ---------- primaryEncoding : ICDUtilities, optional First encoding. The", "the input code. Parameters ---------- parent_code : str ICD10 string for parent code.", "hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt' if chapterFile==None: chapterFile = ICD_PATH+'ICD10_Chapters.txt' #quick reference, to avoid having to search", "None: try: with open(ICD_PATH+'icd10cm_to_ukbb.pth','rb') as f: self.EncodingCoversionTable = pickle.load(f) except FileNotFoundError: self.primaryEncoding =", "the code of interest. \"\"\" if includeRelationship: look_up=['Secondary Code(s)','Relationship'] else: look_up='Secondary Code(s)' try:", "= [x.code for x in self.UnusableICDCodes] for del_code in all_del_codes: if del_code in", "None. Returns ------- None. \"\"\" if (primaryEncoding is not None) or (secondaryEncoding is", "_findParentInList(self,code,parentList): while len(parentList) > 0: if parentList[-1] in code: return parentList else: parentList.pop()", "include periods (ie J101 not J10.1) Returns ------- terminal_code_list : list List of", "bool, optional Indicates if code is parent. The default is None. Raises ------", "path to another ICD10 file. ICD9 codebook could be used instead, but you", "is None. Raises ------ ValueError ValueError raised if unable to parse some line.", "check if there is 1:1 mapping between codes if code.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary", "1:1 mapping between codes if code.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([code.code])] translation_table['Relationship']+=['Direct']", "primaryCode : str Diagnostic code to be converted . includeRelationship : bool, optional.", "self.code class ICDUtilities: def _convertToUnicode(self,byteString): return unidecode(str(byteString,\"ISO-8859-1\")) def _lettersToInt(self,letter,base): list(string.ascii_uppercase).index(letter)*base def _convertCodeToIntVal(self,code): intVal=0", "self.UsableICDCodes] marker_list_unusable = [x.code for x in self.UnusableICDCodes] for del_code in all_del_codes: if", "= [x.code for x in code.child_codes] allowed_child_codes = set(child_code_names).intersection(self.secondaryEncoding.setOfUsableCodes) if len(allowed_child_codes)>0: translation_table['Primary Code']+=[code.code]", "has not been tested. The default is None. chapterFile : str, optional File", "parentList.pop() return parentList def ReturnCodeObject(self,code): \"\"\" Returns full code object (not just string)", "code class for input string. \"\"\" if code in self.setOfUnusableCodes: return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]] else:", "By default, the package ships with 2018 version of ICD10-CM and 2020 version", "upgrade to 2019 (or downgrade for that matter) by specifying the path to", "return terminal_code_list def __init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None): \"\"\" Class that manipulates the ICD10 codebook. It stores", "ICD10 code. Returns ------- str ICD10 chapter. \"\"\" code = code.replace('.','') currentCode =", "2) ICD10 codes and hierarchy: icd10cm_order_2018.txt, https://www.cms.gov/Medicare/Coding/ICD10/2018-ICD-10-CM-and-GEMs.html By default, the package ships with", "version of ICD10 from UK Biobank. You can upgrade to 2019 (or downgrade", "f: f.readline() for line in f: line=self._convertToUnicode(line) line=line.strip('\\n').split('\\t') self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)] start,stop = line[1].split('-') chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])]", "creates a map between ICD10-CM and ICD10 (UKBB) Parameters ---------- primaryEncoding : ICDUtilities,", "all ICD10 codes that are children of the input code. Parameters ---------- parent_code", "_deleteCode(self,del_code): del_code_obj = self.ReturnCodeObject(del_code) parent_code = del_code_obj.parent_code del_code_list=[del_code] if parent_code is not None:", "an alternative code chapter file (ie main groups of codes). Again, this may", "chapter_breakpoints=[] chapter_list=[] currentUsableCodeCount = 0 currentUnusableCodeCount = 0 with open(chapterFile,'rb') as f: f.readline()", "\"\"\" Class that manipulates the ICD10 codebook. It stores the codebook as a", "#first load the chapters chapter_breakpoints=[] chapter_list=[] currentUsableCodeCount = 0 currentUnusableCodeCount = 0 with", "start,stop = line[1].split('-') chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])] chapter_list+=['Chapter_'+line[0]] self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount self.setOfUnusableCodes.add('Chapter_'+line[0]) currentUnusableCodeCount+=1 #now load hierarchy file with", "else: self.EncodingCoversionTable=self._buildTranslationTable() def ReturnConversionSet(self,primaryCode,includeRelationship=False): \"\"\" Returns set of codes that represent the mapping", "(UKBB) Parameters ---------- primaryEncoding : ICDUtilities, optional First encoding. The default is None.", "is not None: currentCode = self.ReturnCodeObject(currentCode.parent_code.code) return str(currentCode) def ReturnSubsumedTerminalCodes(self,parent_code): \"\"\" Returns all", "The default is None. Raises ------ ValueError ValueError raised if unable to parse", "self.usableCodeToIndexMap={} self.setOfUsableCodes=set() self.UnusableICDCodes=[] self.unusableCodeToIndexMap={} self.setOfUnusableCodes=set() #first load the chapters chapter_breakpoints=[] chapter_list=[] currentUsableCodeCount =", "self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))] self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount self.setOfUnusableCodes.add(parsedLine[1]) currentUnusableCodeCount+=1 currentParentList+=[parsedLine[1]] class ICD10TranslationMap: def _buildTranslationTable(self): translation_table={'Primary Code':[],'Secondary Code(s)':[],'Relationship':[]} for", "not None: parent_code.child_codes.remove(del_code_obj) if del_code_obj.is_terminal==False: for child_code in del_code_obj.child_codes: del_code_list+=self._deleteCode(child_code.code) return del_code_list def", "two ICD Utilities with at least some shared codes by taking advantage of", "useICD10UKBB : bool, optional Specifies class to use the UK Biobank version of", "is terminal (no children). parent_code : bool, optional Indicates if code is parent.", "and hierarchy: icd10cm_order_2018.txt, https://www.cms.gov/Medicare/Coding/ICD10/2018-ICD-10-CM-and-GEMs.html By default, the package ships with 2018 version of", "path to an alternative code hierarchy file. This may (unlikely) work with other", "to delete Returns ------- None \"\"\" all_del_codes=self._deleteCode(del_code) marker_list_usable= [x.code for x in self.UsableICDCodes]", "ICD10TranslationMap: def _buildTranslationTable(self): translation_table={'Primary Code':[],'Secondary Code(s)':[],'Relationship':[]} for code in self.primaryEncoding.UsableICDCodes: #first check if", "load hierarchy file with open(hierarchyFile,'rb') as f: currentParentList = [] for line in", "translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[allowed_child_codes] translation_table['Relationship']+=['Child'] translation_table=pd.DataFrame(translation_table) translation_table.set_index('Primary Code',drop=False,inplace=True) return translation_table def __init__(self,primaryEncoding=None,secondaryEncoding=None): \"\"\"", "encoding. The default is None. Returns ------- None. \"\"\" if (primaryEncoding is not", "if hierarchyFile==None: if useICD10UKBB: hierarchyFile=ICD_PATH+'icd10_ukbb.txt' else: hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt' if chapterFile==None: chapterFile = ICD_PATH+'ICD10_Chapters.txt' #quick", "self.usableCodeToIndexMap=dict(zip(marker_list_usable,range(len(marker_list_usable)))) self.unusableCodeToIndexMap=dict(zip(marker_list_unusable,range(len(marker_list_unusable)))) self.setOfUnusableCodes=set(self.unusableCodeToIndexMap.keys()) self.setOfUsableCodes=set(self.usableCodeToIndexMap.keys()) def _deleteCode(self,del_code): del_code_obj = self.ReturnCodeObject(del_code) parent_code = del_code_obj.parent_code del_code_list=[del_code]", "return parentList def ReturnCodeObject(self,code): \"\"\" Returns full code object (not just string) for", "line of interest. Returns ------- None. \"\"\" if hierarchyFile==None: if useICD10UKBB: hierarchyFile=ICD_PATH+'icd10_ukbb.txt' else:", "codebook could be used instead, but you would need to construct data files", "unable to parse some line. Prints out the line of interest. Returns -------", "self.unusableCodeToIndexMap=dict(zip(marker_list_unusable,range(len(marker_list_unusable)))) self.setOfUnusableCodes=set(self.unusableCodeToIndexMap.keys()) self.setOfUsableCodes=set(self.usableCodeToIndexMap.keys()) def _deleteCode(self,del_code): del_code_obj = self.ReturnCodeObject(del_code) parent_code = del_code_obj.parent_code del_code_list=[del_code] if", "pd ICD_PATH = pkg_resources.resource_filename('vlpi', 'data/ICDData/') class ICDCode: def __init__(self, code, associated_string,is_terminal, parent_code=None): \"\"\"", "------- ICDCode ICD10 code class for input string. \"\"\" if code in self.setOfUnusableCodes:", "self.setOfUnusableCodes.add(parsedLine[1]) currentUnusableCodeCount+=1 currentParentList+=[parsedLine[1]] class ICD10TranslationMap: def _buildTranslationTable(self): translation_table={'Primary Code':[],'Secondary Code(s)':[],'Relationship':[]} for code in", "in self.setOfUnusableCodes: return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]] else: return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]] def DeleteCode(self,del_code): \"\"\" Removes the ICD", "search full tree for codes. #Full list of linked codes self.UsableICDCodes=[] self.usableCodeToIndexMap={} self.setOfUsableCodes=set()", "child_code_names = [x.code for x in code.child_codes] allowed_child_codes = set(child_code_names).intersection(self.secondaryEncoding.setOfUsableCodes) if len(allowed_child_codes)>0: translation_table['Primary", "currentUnusableCodeCount = 0 with open(chapterFile,'rb') as f: f.readline() for line in f: line=self._convertToUnicode(line)", "chapter_list[next(x[0] for x in enumerate(chapter_breakpoints) if intVal <= x[1])] except StopIteration: raise ValueError('{}'.format(parsedLine[1]))", "Code',drop=False,inplace=True) return translation_table def __init__(self,primaryEncoding=None,secondaryEncoding=None): \"\"\" Builds translation map between two ICD Utilities", "Code(s)']+=[set([code.code])] translation_table['Relationship']+=['Direct'] else: parent = code.parent_code if parent.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary", "ICDUtilities, optional First encoding. The default is None. secondaryEncoding : ICDUtilities, optional Second", "chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])] chapter_list+=['Chapter_'+line[0]] self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount self.setOfUnusableCodes.add('Chapter_'+line[0]) currentUnusableCodeCount+=1 #now load hierarchy file with open(hierarchyFile,'rb') as f:", "node: child = {}, parent = {}\".format(code,parent_code.code)) def __str__(self): return self.associated_string def __repr__(self):", "return self.associated_string def __repr__(self): return self.code class ICDUtilities: def _convertToUnicode(self,byteString): return unidecode(str(byteString,\"ISO-8859-1\")) def", "least some shared codes by taking advantage of shared hierarchical structure. If primaryEncoding", "primaryEncoding : ICDUtilities, optional First encoding. The default is None. secondaryEncoding : ICDUtilities,", "code. Parameters ---------- code : str ICD10 code string. Returns ------- ICDCode ICD10", "UK Biobank. You can upgrade to 2019 (or downgrade for that matter) by", "Created on Tue Jul 9 08:42:18 2019 @author: davidblair \"\"\" from unidecode import", "2019 (or downgrade for that matter) by specifying the path to another ICD10", "Returns ------- None. \"\"\" if hierarchyFile==None: if useICD10UKBB: hierarchyFile=ICD_PATH+'icd10_ukbb.txt' else: hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt' if chapterFile==None:", "------- None. \"\"\" if hierarchyFile==None: if useICD10UKBB: hierarchyFile=ICD_PATH+'icd10_ukbb.txt' else: hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt' if chapterFile==None: chapterFile", "Returns ------- ICDCode ICD10 code class for input string. \"\"\" if code in", "to return the relationship type in addition to code. The default is False.", "marker_list_usable.remove(del_code) self.usableCodeToIndexMap=dict(zip(marker_list_usable,range(len(marker_list_usable)))) self.unusableCodeToIndexMap=dict(zip(marker_list_unusable,range(len(marker_list_unusable)))) self.setOfUnusableCodes=set(self.unusableCodeToIndexMap.keys()) self.setOfUsableCodes=set(self.usableCodeToIndexMap.keys()) def _deleteCode(self,del_code): del_code_obj = self.ReturnCodeObject(del_code) parent_code = del_code_obj.parent_code", "self.parent_code = parent_code if parent_code is not None: if parent_code.is_terminal==False: parent_code.child_codes.append(self) else: raise", "hierarchyFile : str, optional File path to an alternative code hierarchy file. This", "chapterFile = ICD_PATH+'ICD10_Chapters.txt' #quick reference, to avoid having to search full tree for", "\"\"\" Returns full code object (not just string) for a given code. Parameters", "import pkg_resources import string import pickle import pandas as pd ICD_PATH = pkg_resources.resource_filename('vlpi',", "ICDUtilities, optional Second encoding. The default is None. Returns ------- None. \"\"\" if", "translation map between two ICD Utilities with at least some shared codes by", "ICD10 code string. Returns ------- ICDCode ICD10 code class for input string. \"\"\"", "expects flat two text files: 1) ICD10_Chapters.txt--chapter heading for all the codes. Manually", "raise ValueError('{}'.format(parsedLine[1])) currentParentList +=[icd_chapter] if int(parsedLine[2])==1: self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))] self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount self.setOfUsableCodes.add(parsedLine[1]) currentUsableCodeCount+=1 else: self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))] self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount", "Biobank. You can upgrade to 2019 (or downgrade for that matter) by specifying", "list of linked codes self.UsableICDCodes=[] self.usableCodeToIndexMap={} self.setOfUsableCodes=set() self.UnusableICDCodes=[] self.unusableCodeToIndexMap={} self.setOfUnusableCodes=set() #first load the", "try: intVal+=int(letter)*10**(base) except ValueError: intVal+=int(list(string.ascii_uppercase).index(letter))*10**(base) return intVal def _findParentInList(self,code,parentList): while len(parentList) > 0:", "code. Parameters ---------- parent_code : str ICD10 string for parent code. Do not", "for x in enumerate(chapter_breakpoints) if intVal <= x[1])] except StopIteration: raise ValueError('{}'.format(parsedLine[1])) currentParentList", "optional First encoding. The default is None. secondaryEncoding : ICDUtilities, optional Second encoding.", "currentParentList = [] for line in f: line=self._convertToUnicode(line) parsedLine=[] parsedLine+=[line[0:6].strip()] parsedLine+=[line[6:14].strip()] parsedLine+=[line[14:16].strip()] parsedLine+=[line[77:].strip()]", "= code self.associated_string = associated_string self.parent_code = parent_code self.child_codes = [] self.is_terminal=is_terminal self.parent_code", "used instead, but you would need to construct data files that match the", "ICDUtilities: def _convertToUnicode(self,byteString): return unidecode(str(byteString,\"ISO-8859-1\")) def _lettersToInt(self,letter,base): list(string.ascii_uppercase).index(letter)*base def _convertCodeToIntVal(self,code): intVal=0 for base,letter", "the ICD code and all children (if exist) from data structure. Parameters ----------", "__init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None): \"\"\" Class that manipulates the ICD10 codebook. It stores the codebook as", "that matter) by specifying the path to another ICD10 file. ICD9 codebook could", "in self.primaryEncoding.UsableICDCodes: #first check if there is 1:1 mapping between codes if code.code", "children). parent_code : bool, optional Indicates if code is parent. The default is", "= associated_string self.parent_code = parent_code self.child_codes = [] self.is_terminal=is_terminal self.parent_code = parent_code if", "self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))] self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount self.setOfUsableCodes.add(parsedLine[1]) currentUsableCodeCount+=1 else: self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))] self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount self.setOfUnusableCodes.add(parsedLine[1]) currentUnusableCodeCount+=1 currentParentList+=[parsedLine[1]] class ICD10TranslationMap: def", "len(code.child_codes)>0: child_code_names = [x.code for x in code.child_codes] allowed_child_codes = set(child_code_names).intersection(self.secondaryEncoding.setOfUsableCodes) if len(allowed_child_codes)>0:", "\"\"\" if hierarchyFile==None: if useICD10UKBB: hierarchyFile=ICD_PATH+'icd10_ukbb.txt' else: hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt' if chapterFile==None: chapterFile = ICD_PATH+'ICD10_Chapters.txt'", "another ICD10 file. ICD9 codebook could be used instead, but you would need", "in code: return parentList else: parentList.pop() return parentList def ReturnCodeObject(self,code): \"\"\" Returns full", "Biobank version of ICD10 (not ICD10-CM). The default is False. hierarchyFile : str,", "Raises ------ ValueError ValueError raised if unable to parse some line. Prints out", "The default is None. Returns ------- None. \"\"\" if (primaryEncoding is not None)", "may (unlikely) work with other encodings but has not been tested. The default", "None. \"\"\" self.code = code self.associated_string = associated_string self.parent_code = parent_code self.child_codes =", "ValueError If unable to add child codes to known parent code. Returns -------", "that are children to parent code. \"\"\" all_child_codes = self.ReturnCodeObject(parent_code).child_codes terminal_code_list=[] for child", "the ICD10 files. Parameters ---------- useICD10UKBB : bool, optional Specifies class to use", "None) and (secondaryEncoding is not None), \"Must specify primary and secondary encoding if", "that are children of the input code. Parameters ---------- parent_code : str ICD10", "code.replace('.','') currentCode = self.ReturnCodeObject(code) while currentCode.parent_code is not None: currentCode = self.ReturnCodeObject(currentCode.parent_code.code) return", "that manipulates the ICD10 codebook. It stores the codebook as a simple tree", "of interest. Returns ------- None. \"\"\" if hierarchyFile==None: if useICD10UKBB: hierarchyFile=ICD_PATH+'icd10_ukbb.txt' else: hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt'", "to avoid having to search full tree for codes. #Full list of linked", "davidblair \"\"\" from unidecode import unidecode import pkg_resources import string import pickle import", "Code(s)':[],'Relationship':[]} for code in self.primaryEncoding.UsableICDCodes: #first check if there is 1:1 mapping between", "the UK Biobank version of ICD10 (not ICD10-CM). The default is False. hierarchyFile", "1) ICD10_Chapters.txt--chapter heading for all the codes. Manually constructed. 2) ICD10 codes and", "code to delete Returns ------- None \"\"\" all_del_codes=self._deleteCode(del_code) marker_list_usable= [x.code for x in", "not been tested. The default is None. chapterFile : str, optional File path", "currentUsableCodeCount+=1 else: self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))] self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount self.setOfUnusableCodes.add(parsedLine[1]) currentUnusableCodeCount+=1 currentParentList+=[parsedLine[1]] class ICD10TranslationMap: def _buildTranslationTable(self): translation_table={'Primary Code':[],'Secondary", "f.readline() for line in f: line=self._convertToUnicode(line) line=line.strip('\\n').split('\\t') self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)] start,stop = line[1].split('-') chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])] chapter_list+=['Chapter_'+line[0]]", "else: parentList.pop() return parentList def ReturnCodeObject(self,code): \"\"\" Returns full code object (not just", "bool, optional. Specicies whether to return the relationship type in addition to code.", "allowed_child_codes = set(child_code_names).intersection(self.secondaryEncoding.setOfUsableCodes) if len(allowed_child_codes)>0: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[allowed_child_codes] translation_table['Relationship']+=['Child'] translation_table=pd.DataFrame(translation_table) translation_table.set_index('Primary Code',drop=False,inplace=True)", "ValueError(\"Attempting to add children to terminal node: child = {}, parent = {}\".format(code,parent_code.code))", "bool, optional Specifies class to use the UK Biobank version of ICD10 (not", "children of the input code. Parameters ---------- parent_code : str ICD10 string for", "all the codes. Manually constructed. 2) ICD10 codes and hierarchy: icd10cm_order_2018.txt, https://www.cms.gov/Medicare/Coding/ICD10/2018-ICD-10-CM-and-GEMs.html By", "with open(ICD_PATH+'icd10cm_to_ukbb.pth','rb') as f: self.EncodingCoversionTable = pickle.load(f) except FileNotFoundError: self.primaryEncoding = ICDUtilities() self.secondaryEncoding=ICDUtilities(useICD10UKBB=True)", "None \"\"\" all_del_codes=self._deleteCode(del_code) marker_list_usable= [x.code for x in self.UsableICDCodes] marker_list_unusable = [x.code for", "base,letter in enumerate(code[::-1]): try: intVal+=int(letter)*10**(base) except ValueError: intVal+=int(list(string.ascii_uppercase).index(letter))*10**(base) return intVal def _findParentInList(self,code,parentList): while", "Class that manipulates the ICD10 codebook. It stores the codebook as a simple", "is None. Raises ------ ValueError If unable to add child codes to known", "currentUnusableCodeCount+=1 currentParentList+=[parsedLine[1]] class ICD10TranslationMap: def _buildTranslationTable(self): translation_table={'Primary Code':[],'Secondary Code(s)':[],'Relationship':[]} for code in self.primaryEncoding.UsableICDCodes:", "parent code. Returns ------- None. \"\"\" self.code = code self.associated_string = associated_string self.parent_code", "structure. If primaryEncoding and secondaryEncoding are unspecified, class creates a map between ICD10-CM", "alternative code hierarchy file. This may (unlikely) work with other encodings but has", "reference, to avoid having to search full tree for codes. #Full list of", "self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([code.code])] translation_table['Relationship']+=['Direct'] else: parent = code.parent_code if parent.code in", "---------- primaryCode : str Diagnostic code to be converted . includeRelationship : bool,", "advantage of shared hierarchical structure. If primaryEncoding and secondaryEncoding are unspecified, class creates", "code of interest. \"\"\" if includeRelationship: look_up=['Secondary Code(s)','Relationship'] else: look_up='Secondary Code(s)' try: return", "08:42:18 2019 @author: davidblair \"\"\" from unidecode import unidecode import pkg_resources import string", "one or the other.\" self.primaryEncoding=primaryEncoding self.secondaryEncoding=secondaryEncoding if primaryEncoding is None: try: with open(ICD_PATH+'icd10cm_to_ukbb.pth','rb')", "Returns ------- None. \"\"\" self.code = code self.associated_string = associated_string self.parent_code = parent_code", "the new encoding system. Parameters ---------- primaryCode : str Diagnostic code to be", "self.ReturnCodeObject(parent_code).child_codes terminal_code_list=[] for child in all_child_codes: if child.is_terminal==True: terminal_code_list+=[child.code] else: terminal_code_list+=self.ReturnSubsumedTerminalCodes(child.code) return terminal_code_list", "hierarchy file with open(hierarchyFile,'rb') as f: currentParentList = [] for line in f:", "default is False. hierarchyFile : str, optional File path to an alternative code", "str Diagnostic code to be converted . includeRelationship : bool, optional. Specicies whether", "a given code. Parameters ---------- code : str ICD10 code string. Returns -------", "known parent code. Returns ------- None. \"\"\" self.code = code self.associated_string = associated_string", "child.is_terminal==True: terminal_code_list+=[child.code] else: terminal_code_list+=self.ReturnSubsumedTerminalCodes(child.code) return terminal_code_list def __init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None): \"\"\" Class that manipulates the", "default is None. chapterFile : str, optional File path to an alternative code", "map between ICD10-CM and ICD10 (UKBB) Parameters ---------- primaryEncoding : ICDUtilities, optional First", "(ie main groups of codes). Again, this may work with other encodings but", "from data structure. Parameters ---------- del_code : str ICD10 code to delete Returns", "encodings but has not been tested. The default is None. Raises ------ ValueError", "manipulates the ICD10 codebook. It stores the codebook as a simple tree (stored", "construct data files that match the structure of the ICD10 files. Parameters ----------", "other encodings but has not been tested. The default is None. chapterFile :", "_buildTranslationTable(self): translation_table={'Primary Code':[],'Secondary Code(s)':[],'Relationship':[]} for code in self.primaryEncoding.UsableICDCodes: #first check if there is", "self.setOfUsableCodes=set() self.UnusableICDCodes=[] self.unusableCodeToIndexMap={} self.setOfUnusableCodes=set() #first load the chapters chapter_breakpoints=[] chapter_list=[] currentUsableCodeCount = 0", "but has not been tested. The default is None. chapterFile : str, optional", "input code Parameters ---------- code : str ICD10 code. Returns ------- str ICD10", "ICD_PATH = pkg_resources.resource_filename('vlpi', 'data/ICDData/') class ICDCode: def __init__(self, code, associated_string,is_terminal, parent_code=None): \"\"\" Parameters", ": str, optional File path to an alternative code chapter file (ie main", "associated_string self.parent_code = parent_code self.child_codes = [] self.is_terminal=is_terminal self.parent_code = parent_code if parent_code", "is None. chapterFile : str, optional File path to an alternative code chapter", "the class, expects flat two text files: 1) ICD10_Chapters.txt--chapter heading for all the", "mapping between codes if code.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([code.code])] translation_table['Relationship']+=['Direct'] else:", "are children to parent code. \"\"\" all_child_codes = self.ReturnCodeObject(parent_code).child_codes terminal_code_list=[] for child in", "exist) from data structure. Parameters ---------- del_code : str ICD10 code to delete", "else: self.UsableICDCodes.pop(marker_list_usable.index(del_code)) marker_list_usable.remove(del_code) self.usableCodeToIndexMap=dict(zip(marker_list_usable,range(len(marker_list_usable)))) self.unusableCodeToIndexMap=dict(zip(marker_list_unusable,range(len(marker_list_unusable)))) self.setOfUnusableCodes=set(self.unusableCodeToIndexMap.keys()) self.setOfUsableCodes=set(self.usableCodeToIndexMap.keys()) def _deleteCode(self,del_code): del_code_obj = self.ReturnCodeObject(del_code) parent_code", "of shared hierarchical structure. If primaryEncoding and secondaryEncoding are unspecified, class creates a", "the codebook as a simple tree (stored as a list called ICDCodes). To", "currentCode = self.ReturnCodeObject(code) while currentCode.parent_code is not None: currentCode = self.ReturnCodeObject(currentCode.parent_code.code) return str(currentCode)", "children to parent code. \"\"\" all_child_codes = self.ReturnCodeObject(parent_code).child_codes terminal_code_list=[] for child in all_child_codes:", "utf-8 -*- \"\"\" Created on Tue Jul 9 08:42:18 2019 @author: davidblair \"\"\"", "#now load hierarchy file with open(hierarchyFile,'rb') as f: currentParentList = [] for line", "all children (if exist) from data structure. Parameters ---------- del_code : str ICD10", "chapter. \"\"\" code = code.replace('.','') currentCode = self.ReturnCodeObject(code) while currentCode.parent_code is not None:", ": str ICD10 code string. Returns ------- ICDCode ICD10 code class for input", "the ICD10 codebook. It stores the codebook as a simple tree (stored as", "icd_chapter = chapter_list[next(x[0] for x in enumerate(chapter_breakpoints) if intVal <= x[1])] except StopIteration:", "secondaryEncoding : ICDUtilities, optional Second encoding. The default is None. Returns ------- None.", "pickle import pandas as pd ICD_PATH = pkg_resources.resource_filename('vlpi', 'data/ICDData/') class ICDCode: def __init__(self,", "= parent_code self.child_codes = [] self.is_terminal=is_terminal self.parent_code = parent_code if parent_code is not", "for child in all_child_codes: if child.is_terminal==True: terminal_code_list+=[child.code] else: terminal_code_list+=self.ReturnSubsumedTerminalCodes(child.code) return terminal_code_list def __init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None):", "string import pickle import pandas as pd ICD_PATH = pkg_resources.resource_filename('vlpi', 'data/ICDData/') class ICDCode:", "Returns all ICD10 codes that are children of the input code. Parameters ----------", ": bool, optional Specifies class to use the UK Biobank version of ICD10", "alternative code chapter file (ie main groups of codes). Again, this may work", "is not None), \"Must specify primary and secondary encoding if providing one or", "input code. Parameters ---------- parent_code : str ICD10 string for parent code. Do", "self.child_codes = [] self.is_terminal=is_terminal self.parent_code = parent_code if parent_code is not None: if", "as a list called ICDCodes). To initialize the class, expects flat two text", "translation_table['Relationship']+=['Child'] translation_table=pd.DataFrame(translation_table) translation_table.set_index('Primary Code',drop=False,inplace=True) return translation_table def __init__(self,primaryEncoding=None,secondaryEncoding=None): \"\"\" Builds translation map between", "<= x[1])] except StopIteration: raise ValueError('{}'.format(parsedLine[1])) currentParentList +=[icd_chapter] if int(parsedLine[2])==1: self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))] self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount self.setOfUsableCodes.add(parsedLine[1])", "codes). Again, this may work with other encodings but has not been tested.", "for line in f: line=self._convertToUnicode(line) line=line.strip('\\n').split('\\t') self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)] start,stop = line[1].split('-') chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])] chapter_list+=['Chapter_'+line[0]] self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount", "code. Returns ------- str ICD10 chapter. \"\"\" code = code.replace('.','') currentCode = self.ReturnCodeObject(code)", "heading for input code Parameters ---------- code : str ICD10 code. Returns -------", "files that match the structure of the ICD10 files. Parameters ---------- useICD10UKBB :", "parsedLine+=[line[6:14].strip()] parsedLine+=[line[14:16].strip()] parsedLine+=[line[77:].strip()] currentParentList = self._findParentInList(parsedLine[1],currentParentList) if len(currentParentList) == 0: intVal = self._convertCodeToIntVal(parsedLine[1][0:3])", "ICDCode ICD10 code class for input string. \"\"\" if code in self.setOfUnusableCodes: return", "ValueError('{}'.format(parsedLine[1])) currentParentList +=[icd_chapter] if int(parsedLine[2])==1: self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))] self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount self.setOfUsableCodes.add(parsedLine[1]) currentUsableCodeCount+=1 else: self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))] self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount self.setOfUnusableCodes.add(parsedLine[1])", "if there is 1:1 mapping between codes if code.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code]", "groups of codes). Again, this may work with other encodings but has not", "of ICD10-CM and 2020 version of ICD10 from UK Biobank. You can upgrade", "ICD10 (not ICD10-CM). The default is False. hierarchyFile : str, optional File path", "interest. Returns ------- None. \"\"\" if hierarchyFile==None: if useICD10UKBB: hierarchyFile=ICD_PATH+'icd10_ukbb.txt' else: hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt' if", "map between two ICD Utilities with at least some shared codes by taking", "codes that represent the mapping of the primary code to the new encoding", "is not None: parent_code.child_codes.remove(del_code_obj) if del_code_obj.is_terminal==False: for child_code in del_code_obj.child_codes: del_code_list+=self._deleteCode(child_code.code) return del_code_list", "code : str ICD10 code string. Returns ------- ICDCode ICD10 code class for", "as f: currentParentList = [] for line in f: line=self._convertToUnicode(line) parsedLine=[] parsedLine+=[line[0:6].strip()] parsedLine+=[line[6:14].strip()]", "False. Returns ------- set Set of codes aligned to the code of interest.", "Set of codes aligned to the code of interest. \"\"\" if includeRelationship: look_up=['Secondary", "optional File path to an alternative code hierarchy file. This may (unlikely) work", "Again, this may work with other encodings but has not been tested. The", "Indicates whether code is terminal (no children). parent_code : bool, optional Indicates if", "heading for all the codes. Manually constructed. 2) ICD10 codes and hierarchy: icd10cm_order_2018.txt,", "self.UsableICDCodes=[] self.usableCodeToIndexMap={} self.setOfUsableCodes=set() self.UnusableICDCodes=[] self.unusableCodeToIndexMap={} self.setOfUnusableCodes=set() #first load the chapters chapter_breakpoints=[] chapter_list=[] currentUsableCodeCount", "text files: 1) ICD10_Chapters.txt--chapter heading for all the codes. Manually constructed. 2) ICD10", "of the primary code to the new encoding system. Parameters ---------- primaryCode :", "set of codes that represent the mapping of the primary code to the", "code string. associated_string : str String defining code in codebook. is_terminal : bool", "= self._convertCodeToIntVal(parsedLine[1][0:3]) try: icd_chapter = chapter_list[next(x[0] for x in enumerate(chapter_breakpoints) if intVal <=", "ICD10 from UK Biobank. You can upgrade to 2019 (or downgrade for that", "self.secondaryEncoding=ICDUtilities(useICD10UKBB=True) self.EncodingCoversionTable=self._buildTranslationTable() self.EncodingCoversionTable.to_pickle(ICD_PATH+'icd10cm_to_ukbb.pth') else: self.EncodingCoversionTable=self._buildTranslationTable() def ReturnConversionSet(self,primaryCode,includeRelationship=False): \"\"\" Returns set of codes that", "line=self._convertToUnicode(line) line=line.strip('\\n').split('\\t') self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)] start,stop = line[1].split('-') chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])] chapter_list+=['Chapter_'+line[0]] self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount self.setOfUnusableCodes.add('Chapter_'+line[0]) currentUnusableCodeCount+=1 #now load", "of codes). Again, this may work with other encodings but has not been", "parent_code = del_code_obj.parent_code del_code_list=[del_code] if parent_code is not None: parent_code.child_codes.remove(del_code_obj) if del_code_obj.is_terminal==False: for", "of codes aligned to the code of interest. \"\"\" if includeRelationship: look_up=['Secondary Code(s)','Relationship']", "del_code in self.setOfUnusableCodes: self.UnusableICDCodes.pop(marker_list_unusable.index(del_code)) marker_list_unusable.remove(del_code) else: self.UsableICDCodes.pop(marker_list_usable.index(del_code)) marker_list_usable.remove(del_code) self.usableCodeToIndexMap=dict(zip(marker_list_usable,range(len(marker_list_usable)))) self.unusableCodeToIndexMap=dict(zip(marker_list_unusable,range(len(marker_list_unusable)))) self.setOfUnusableCodes=set(self.unusableCodeToIndexMap.keys()) self.setOfUsableCodes=set(self.usableCodeToIndexMap.keys()) def", "code in self.setOfUnusableCodes: return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]] else: return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]] def DeleteCode(self,del_code): \"\"\" Removes the", "match the structure of the ICD10 files. Parameters ---------- useICD10UKBB : bool, optional", "translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([parent.code])] translation_table['Relationship']+=['Parent'] else: if len(code.child_codes)>0: child_code_names = [x.code for x", "the other.\" self.primaryEncoding=primaryEncoding self.secondaryEncoding=secondaryEncoding if primaryEncoding is None: try: with open(ICD_PATH+'icd10cm_to_ukbb.pth','rb') as f:", "Prints out the line of interest. Returns ------- None. \"\"\" if hierarchyFile==None: if", "translation_table.set_index('Primary Code',drop=False,inplace=True) return translation_table def __init__(self,primaryEncoding=None,secondaryEncoding=None): \"\"\" Builds translation map between two ICD", "def _lettersToInt(self,letter,base): list(string.ascii_uppercase).index(letter)*base def _convertCodeToIntVal(self,code): intVal=0 for base,letter in enumerate(code[::-1]): try: intVal+=int(letter)*10**(base) except", "ICD10 code to delete Returns ------- None \"\"\" all_del_codes=self._deleteCode(del_code) marker_list_usable= [x.code for x", "= {}, parent = {}\".format(code,parent_code.code)) def __str__(self): return self.associated_string def __repr__(self): return self.code", "is not None): assert (secondaryEncoding is not None) and (secondaryEncoding is not None),", "-*- coding: utf-8 -*- \"\"\" Created on Tue Jul 9 08:42:18 2019 @author:", "codes self.UsableICDCodes=[] self.usableCodeToIndexMap={} self.setOfUsableCodes=set() self.UnusableICDCodes=[] self.unusableCodeToIndexMap={} self.setOfUnusableCodes=set() #first load the chapters chapter_breakpoints=[] chapter_list=[]", "not None), \"Must specify primary and secondary encoding if providing one or the", "represent the mapping of the primary code to the new encoding system. Parameters", "not None: if parent_code.is_terminal==False: parent_code.child_codes.append(self) else: raise ValueError(\"Attempting to add children to terminal", "str ICD10 string for parent code. Do not include periods (ie J101 not", "self.EncodingCoversionTable = pickle.load(f) except FileNotFoundError: self.primaryEncoding = ICDUtilities() self.secondaryEncoding=ICDUtilities(useICD10UKBB=True) self.EncodingCoversionTable=self._buildTranslationTable() self.EncodingCoversionTable.to_pickle(ICD_PATH+'icd10cm_to_ukbb.pth') else: self.EncodingCoversionTable=self._buildTranslationTable()", "unidecode(str(byteString,\"ISO-8859-1\")) def _lettersToInt(self,letter,base): list(string.ascii_uppercase).index(letter)*base def _convertCodeToIntVal(self,code): intVal=0 for base,letter in enumerate(code[::-1]): try: intVal+=int(letter)*10**(base)", "code: return parentList else: parentList.pop() return parentList def ReturnCodeObject(self,code): \"\"\" Returns full code", "------- None \"\"\" all_del_codes=self._deleteCode(del_code) marker_list_usable= [x.code for x in self.UsableICDCodes] marker_list_unusable = [x.code", "return the relationship type in addition to code. The default is False. Returns", "-*- \"\"\" Created on Tue Jul 9 08:42:18 2019 @author: davidblair \"\"\" from", "parent_code.child_codes.append(self) else: raise ValueError(\"Attempting to add children to terminal node: child = {},", "{}, parent = {}\".format(code,parent_code.code)) def __str__(self): return self.associated_string def __repr__(self): return self.code class", "chapter_list+=['Chapter_'+line[0]] self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount self.setOfUnusableCodes.add('Chapter_'+line[0]) currentUnusableCodeCount+=1 #now load hierarchy file with open(hierarchyFile,'rb') as f: currentParentList", "---------- parent_code : str ICD10 string for parent code. Do not include periods", "pandas as pd ICD_PATH = pkg_resources.resource_filename('vlpi', 'data/ICDData/') class ICDCode: def __init__(self, code, associated_string,is_terminal,", "code = code.replace('.','') currentCode = self.ReturnCodeObject(code) while currentCode.parent_code is not None: currentCode =", "the line of interest. Returns ------- None. \"\"\" if hierarchyFile==None: if useICD10UKBB: hierarchyFile=ICD_PATH+'icd10_ukbb.txt'", "code in codebook. is_terminal : bool Indicates whether code is terminal (no children).", "open(hierarchyFile,'rb') as f: currentParentList = [] for line in f: line=self._convertToUnicode(line) parsedLine=[] parsedLine+=[line[0:6].strip()]", "specifying the path to another ICD10 file. ICD9 codebook could be used instead,", "---------- useICD10UKBB : bool, optional Specifies class to use the UK Biobank version", "---------- del_code : str ICD10 code to delete Returns ------- None \"\"\" all_del_codes=self._deleteCode(del_code)", "if len(code.child_codes)>0: child_code_names = [x.code for x in code.child_codes] allowed_child_codes = set(child_code_names).intersection(self.secondaryEncoding.setOfUsableCodes) if", "in self.UsableICDCodes] marker_list_unusable = [x.code for x in self.UnusableICDCodes] for del_code in all_del_codes:", "delete Returns ------- None \"\"\" all_del_codes=self._deleteCode(del_code) marker_list_usable= [x.code for x in self.UsableICDCodes] marker_list_unusable", "try: icd_chapter = chapter_list[next(x[0] for x in enumerate(chapter_breakpoints) if intVal <= x[1])] except", "may work with other encodings but has not been tested. The default is", "(not ICD10-CM). The default is False. hierarchyFile : str, optional File path to", "None. Raises ------ ValueError If unable to add child codes to known parent", "UK Biobank version of ICD10 (not ICD10-CM). The default is False. hierarchyFile :", "string. \"\"\" if code in self.setOfUnusableCodes: return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]] else: return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]] def DeleteCode(self,del_code):", "whether code is terminal (no children). parent_code : bool, optional Indicates if code", "icd10cm_order_2018.txt, https://www.cms.gov/Medicare/Coding/ICD10/2018-ICD-10-CM-and-GEMs.html By default, the package ships with 2018 version of ICD10-CM and", "currentParentList +=[icd_chapter] if int(parsedLine[2])==1: self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))] self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount self.setOfUsableCodes.add(parsedLine[1]) currentUsableCodeCount+=1 else: self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))] self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount self.setOfUnusableCodes.add(parsedLine[1]) currentUnusableCodeCount+=1", "code : str ICD10 code. Returns ------- str ICD10 chapter. \"\"\" code =", "codes. Manually constructed. 2) ICD10 codes and hierarchy: icd10cm_order_2018.txt, https://www.cms.gov/Medicare/Coding/ICD10/2018-ICD-10-CM-and-GEMs.html By default, the", "def __init__(self, code, associated_string,is_terminal, parent_code=None): \"\"\" Parameters ---------- code : str ICD10 code", "ICD10 file. ICD9 codebook could be used instead, but you would need to", "translation_table def __init__(self,primaryEncoding=None,secondaryEncoding=None): \"\"\" Builds translation map between two ICD Utilities with at", "Parameters ---------- primaryCode : str Diagnostic code to be converted . includeRelationship :", "else: raise ValueError(\"Attempting to add children to terminal node: child = {}, parent", "del_code_list+=self._deleteCode(child_code.code) return del_code_list def AssignCodeToChapter(self,code): \"\"\" Returns the chapter heading for input code", "del_code_list=[del_code] if parent_code is not None: parent_code.child_codes.remove(del_code_obj) if del_code_obj.is_terminal==False: for child_code in del_code_obj.child_codes:", "primaryEncoding is None: try: with open(ICD_PATH+'icd10cm_to_ukbb.pth','rb') as f: self.EncodingCoversionTable = pickle.load(f) except FileNotFoundError:", "def _convertCodeToIntVal(self,code): intVal=0 for base,letter in enumerate(code[::-1]): try: intVal+=int(letter)*10**(base) except ValueError: intVal+=int(list(string.ascii_uppercase).index(letter))*10**(base) return", "marker_list_unusable = [x.code for x in self.UnusableICDCodes] for del_code in all_del_codes: if del_code", "new encoding system. Parameters ---------- primaryCode : str Diagnostic code to be converted", "string) for a given code. Parameters ---------- code : str ICD10 code string.", "default is False. Returns ------- set Set of codes aligned to the code", "Returns ------- terminal_code_list : list List of ICD10 codes that are children to", "intVal+=int(letter)*10**(base) except ValueError: intVal+=int(list(string.ascii_uppercase).index(letter))*10**(base) return intVal def _findParentInList(self,code,parentList): while len(parentList) > 0: if", "for code in self.primaryEncoding.UsableICDCodes: #first check if there is 1:1 mapping between codes", "structure of the ICD10 files. Parameters ---------- useICD10UKBB : bool, optional Specifies class", "Second encoding. The default is None. Returns ------- None. \"\"\" if (primaryEncoding is", "0: if parentList[-1] in code: return parentList else: parentList.pop() return parentList def ReturnCodeObject(self,code):", "to construct data files that match the structure of the ICD10 files. Parameters", "#first check if there is 1:1 mapping between codes if code.code in self.secondaryEncoding.setOfUsableCodes:", "for input code Parameters ---------- code : str ICD10 code. Returns ------- str", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Tue Jul 9", "bool Indicates whether code is terminal (no children). parent_code : bool, optional Indicates", "in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([code.code])] translation_table['Relationship']+=['Direct'] else: parent = code.parent_code if parent.code", "codebook. It stores the codebook as a simple tree (stored as a list", "Specicies whether to return the relationship type in addition to code. The default", "parent = {}\".format(code,parent_code.code)) def __str__(self): return self.associated_string def __repr__(self): return self.code class ICDUtilities:", "len(allowed_child_codes)>0: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[allowed_child_codes] translation_table['Relationship']+=['Child'] translation_table=pd.DataFrame(translation_table) translation_table.set_index('Primary Code',drop=False,inplace=True) return translation_table def __init__(self,primaryEncoding=None,secondaryEncoding=None):", "self.associated_string = associated_string self.parent_code = parent_code self.child_codes = [] self.is_terminal=is_terminal self.parent_code = parent_code", "Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([code.code])] translation_table['Relationship']+=['Direct'] else: parent = code.parent_code if parent.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary", "self.setOfUsableCodes=set(self.usableCodeToIndexMap.keys()) def _deleteCode(self,del_code): del_code_obj = self.ReturnCodeObject(del_code) parent_code = del_code_obj.parent_code del_code_list=[del_code] if parent_code is", "associated_string : str String defining code in codebook. is_terminal : bool Indicates whether", "optional Specifies class to use the UK Biobank version of ICD10 (not ICD10-CM).", "def _deleteCode(self,del_code): del_code_obj = self.ReturnCodeObject(del_code) parent_code = del_code_obj.parent_code del_code_list=[del_code] if parent_code is not", "This may (unlikely) work with other encodings but has not been tested. The", "currentUsableCodeCount = 0 currentUnusableCodeCount = 0 with open(chapterFile,'rb') as f: f.readline() for line", "in codebook. is_terminal : bool Indicates whether code is terminal (no children). parent_code", "self.ReturnCodeObject(currentCode.parent_code.code) return str(currentCode) def ReturnSubsumedTerminalCodes(self,parent_code): \"\"\" Returns all ICD10 codes that are children", "else: terminal_code_list+=self.ReturnSubsumedTerminalCodes(child.code) return terminal_code_list def __init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None): \"\"\" Class that manipulates the ICD10 codebook.", "aligned to the code of interest. \"\"\" if includeRelationship: look_up=['Secondary Code(s)','Relationship'] else: look_up='Secondary", "------- set Set of codes aligned to the code of interest. \"\"\" if", "return str(currentCode) def ReturnSubsumedTerminalCodes(self,parent_code): \"\"\" Returns all ICD10 codes that are children of", "of linked codes self.UsableICDCodes=[] self.usableCodeToIndexMap={} self.setOfUsableCodes=set() self.UnusableICDCodes=[] self.unusableCodeToIndexMap={} self.setOfUnusableCodes=set() #first load the chapters", "self.UnusableICDCodes] for del_code in all_del_codes: if del_code in self.setOfUnusableCodes: self.UnusableICDCodes.pop(marker_list_unusable.index(del_code)) marker_list_unusable.remove(del_code) else: self.UsableICDCodes.pop(marker_list_usable.index(del_code))", "hierarchy file. This may (unlikely) work with other encodings but has not been", "currentUnusableCodeCount+=1 #now load hierarchy file with open(hierarchyFile,'rb') as f: currentParentList = [] for", "hierarchy: icd10cm_order_2018.txt, https://www.cms.gov/Medicare/Coding/ICD10/2018-ICD-10-CM-and-GEMs.html By default, the package ships with 2018 version of ICD10-CM", ": str ICD10 code. Returns ------- str ICD10 chapter. \"\"\" code = code.replace('.','')", "linked codes self.UsableICDCodes=[] self.usableCodeToIndexMap={} self.setOfUsableCodes=set() self.UnusableICDCodes=[] self.unusableCodeToIndexMap={} self.setOfUnusableCodes=set() #first load the chapters chapter_breakpoints=[]", "parse some line. Prints out the line of interest. Returns ------- None. \"\"\"", "on Tue Jul 9 08:42:18 2019 @author: davidblair \"\"\" from unidecode import unidecode", "self.code = code self.associated_string = associated_string self.parent_code = parent_code self.child_codes = [] self.is_terminal=is_terminal", "for x in code.child_codes] allowed_child_codes = set(child_code_names).intersection(self.secondaryEncoding.setOfUsableCodes) if len(allowed_child_codes)>0: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[allowed_child_codes]", "parent code. \"\"\" all_child_codes = self.ReturnCodeObject(parent_code).child_codes terminal_code_list=[] for child in all_child_codes: if child.is_terminal==True:", "unidecode import pkg_resources import string import pickle import pandas as pd ICD_PATH =", "codes if code.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([code.code])] translation_table['Relationship']+=['Direct'] else: parent =", "\"\"\" Parameters ---------- code : str ICD10 code string. associated_string : str String", "self.setOfUnusableCodes: return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]] else: return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]] def DeleteCode(self,del_code): \"\"\" Removes the ICD code", "ICD10-CM). The default is False. hierarchyFile : str, optional File path to an", "optional. Specicies whether to return the relationship type in addition to code. The", "if code is parent. The default is None. Raises ------ ValueError If unable", "x in enumerate(chapter_breakpoints) if intVal <= x[1])] except StopIteration: raise ValueError('{}'.format(parsedLine[1])) currentParentList +=[icd_chapter]", "constructed. 2) ICD10 codes and hierarchy: icd10cm_order_2018.txt, https://www.cms.gov/Medicare/Coding/ICD10/2018-ICD-10-CM-and-GEMs.html By default, the package ships", "Raises ------ ValueError If unable to add child codes to known parent code.", "ICDCodes). To initialize the class, expects flat two text files: 1) ICD10_Chapters.txt--chapter heading", "interest. \"\"\" if includeRelationship: look_up=['Secondary Code(s)','Relationship'] else: look_up='Secondary Code(s)' try: return self.EncodingCoversionTable.loc[primaryCode][look_up] except", "String defining code in codebook. is_terminal : bool Indicates whether code is terminal", "0 with open(chapterFile,'rb') as f: f.readline() for line in f: line=self._convertToUnicode(line) line=line.strip('\\n').split('\\t') self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)]", "= pickle.load(f) except FileNotFoundError: self.primaryEncoding = ICDUtilities() self.secondaryEncoding=ICDUtilities(useICD10UKBB=True) self.EncodingCoversionTable=self._buildTranslationTable() self.EncodingCoversionTable.to_pickle(ICD_PATH+'icd10cm_to_ukbb.pth') else: self.EncodingCoversionTable=self._buildTranslationTable() def", "parent_code : bool, optional Indicates if code is parent. The default is None.", "terminal node: child = {}, parent = {}\".format(code,parent_code.code)) def __str__(self): return self.associated_string def", "ReturnConversionSet(self,primaryCode,includeRelationship=False): \"\"\" Returns set of codes that represent the mapping of the primary", "del_code_obj = self.ReturnCodeObject(del_code) parent_code = del_code_obj.parent_code del_code_list=[del_code] if parent_code is not None: parent_code.child_codes.remove(del_code_obj)", "the relationship type in addition to code. The default is False. Returns -------", "instead, but you would need to construct data files that match the structure", "__init__(self,primaryEncoding=None,secondaryEncoding=None): \"\"\" Builds translation map between two ICD Utilities with at least some", "str(currentCode) def ReturnSubsumedTerminalCodes(self,parent_code): \"\"\" Returns all ICD10 codes that are children of the", "translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([code.code])] translation_table['Relationship']+=['Direct'] else: parent = code.parent_code if parent.code in self.secondaryEncoding.setOfUsableCodes:", "ICDCode: def __init__(self, code, associated_string,is_terminal, parent_code=None): \"\"\" Parameters ---------- code : str ICD10", "encoding system. Parameters ---------- primaryCode : str Diagnostic code to be converted .", "x in self.UsableICDCodes] marker_list_unusable = [x.code for x in self.UnusableICDCodes] for del_code in", "encodings but has not been tested. The default is None. chapterFile : str,", "Parameters ---------- code : str ICD10 code. Returns ------- str ICD10 chapter. \"\"\"", "parsedLine+=[line[77:].strip()] currentParentList = self._findParentInList(parsedLine[1],currentParentList) if len(currentParentList) == 0: intVal = self._convertCodeToIntVal(parsedLine[1][0:3]) try: icd_chapter", "set Set of codes aligned to the code of interest. \"\"\" if includeRelationship:", "is not None) and (secondaryEncoding is not None), \"Must specify primary and secondary", "(if exist) from data structure. Parameters ---------- del_code : str ICD10 code to", "codes by taking advantage of shared hierarchical structure. If primaryEncoding and secondaryEncoding are", "full tree for codes. #Full list of linked codes self.UsableICDCodes=[] self.usableCodeToIndexMap={} self.setOfUsableCodes=set() self.UnusableICDCodes=[]", "load the chapters chapter_breakpoints=[] chapter_list=[] currentUsableCodeCount = 0 currentUnusableCodeCount = 0 with open(chapterFile,'rb')", "(primaryEncoding is not None) or (secondaryEncoding is not None): assert (secondaryEncoding is not", "is False. Returns ------- set Set of codes aligned to the code of", "\"\"\" Returns all ICD10 codes that are children of the input code. Parameters", "self.setOfUsableCodes.add(parsedLine[1]) currentUsableCodeCount+=1 else: self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))] self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount self.setOfUnusableCodes.add(parsedLine[1]) currentUnusableCodeCount+=1 currentParentList+=[parsedLine[1]] class ICD10TranslationMap: def _buildTranslationTable(self): translation_table={'Primary", ": str ICD10 string for parent code. Do not include periods (ie J101", "List of ICD10 codes that are children to parent code. \"\"\" all_child_codes =", "Returns ------- None \"\"\" all_del_codes=self._deleteCode(del_code) marker_list_usable= [x.code for x in self.UsableICDCodes] marker_list_unusable =", "import pickle import pandas as pd ICD_PATH = pkg_resources.resource_filename('vlpi', 'data/ICDData/') class ICDCode: def", "= del_code_obj.parent_code del_code_list=[del_code] if parent_code is not None: parent_code.child_codes.remove(del_code_obj) if del_code_obj.is_terminal==False: for child_code", "None. Raises ------ ValueError ValueError raised if unable to parse some line. Prints", ": bool, optional. Specicies whether to return the relationship type in addition to", "f: currentParentList = [] for line in f: line=self._convertToUnicode(line) parsedLine=[] parsedLine+=[line[0:6].strip()] parsedLine+=[line[6:14].strip()] parsedLine+=[line[14:16].strip()]", "x in code.child_codes] allowed_child_codes = set(child_code_names).intersection(self.secondaryEncoding.setOfUsableCodes) if len(allowed_child_codes)>0: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[allowed_child_codes] translation_table['Relationship']+=['Child']", "class, expects flat two text files: 1) ICD10_Chapters.txt--chapter heading for all the codes.", "(ie J101 not J10.1) Returns ------- terminal_code_list : list List of ICD10 codes", "an alternative code hierarchy file. This may (unlikely) work with other encodings but", "class for input string. \"\"\" if code in self.setOfUnusableCodes: return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]] else: return", "The default is None. Raises ------ ValueError If unable to add child codes", "2020 version of ICD10 from UK Biobank. You can upgrade to 2019 (or", "if includeRelationship: look_up=['Secondary Code(s)','Relationship'] else: look_up='Secondary Code(s)' try: return self.EncodingCoversionTable.loc[primaryCode][look_up] except KeyError: return", "for x in self.UnusableICDCodes] for del_code in all_del_codes: if del_code in self.setOfUnusableCodes: self.UnusableICDCodes.pop(marker_list_unusable.index(del_code))", "stores the codebook as a simple tree (stored as a list called ICDCodes).", "coding: utf-8 -*- \"\"\" Created on Tue Jul 9 08:42:18 2019 @author: davidblair", "= pkg_resources.resource_filename('vlpi', 'data/ICDData/') class ICDCode: def __init__(self, code, associated_string,is_terminal, parent_code=None): \"\"\" Parameters ----------", "self.ReturnCodeObject(code) while currentCode.parent_code is not None: currentCode = self.ReturnCodeObject(currentCode.parent_code.code) return str(currentCode) def ReturnSubsumedTerminalCodes(self,parent_code):", "= [] self.is_terminal=is_terminal self.parent_code = parent_code if parent_code is not None: if parent_code.is_terminal==False:", "self.primaryEncoding = ICDUtilities() self.secondaryEncoding=ICDUtilities(useICD10UKBB=True) self.EncodingCoversionTable=self._buildTranslationTable() self.EncodingCoversionTable.to_pickle(ICD_PATH+'icd10cm_to_ukbb.pth') else: self.EncodingCoversionTable=self._buildTranslationTable() def ReturnConversionSet(self,primaryCode,includeRelationship=False): \"\"\" Returns set", "tested. The default is None. chapterFile : str, optional File path to an", "except StopIteration: raise ValueError('{}'.format(parsedLine[1])) currentParentList +=[icd_chapter] if int(parsedLine[2])==1: self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))] self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount self.setOfUsableCodes.add(parsedLine[1]) currentUsableCodeCount+=1 else:", "def ReturnSubsumedTerminalCodes(self,parent_code): \"\"\" Returns all ICD10 codes that are children of the input", "codes and hierarchy: icd10cm_order_2018.txt, https://www.cms.gov/Medicare/Coding/ICD10/2018-ICD-10-CM-and-GEMs.html By default, the package ships with 2018 version", "#quick reference, to avoid having to search full tree for codes. #Full list", "\"\"\" code = code.replace('.','') currentCode = self.ReturnCodeObject(code) while currentCode.parent_code is not None: currentCode", ": bool, optional Indicates if code is parent. The default is None. Raises", "in self.UnusableICDCodes] for del_code in all_del_codes: if del_code in self.setOfUnusableCodes: self.UnusableICDCodes.pop(marker_list_unusable.index(del_code)) marker_list_unusable.remove(del_code) else:", "are unspecified, class creates a map between ICD10-CM and ICD10 (UKBB) Parameters ----------", "return unidecode(str(byteString,\"ISO-8859-1\")) def _lettersToInt(self,letter,base): list(string.ascii_uppercase).index(letter)*base def _convertCodeToIntVal(self,code): intVal=0 for base,letter in enumerate(code[::-1]): try:", "code. The default is False. Returns ------- set Set of codes aligned to", "tested. The default is None. Raises ------ ValueError ValueError raised if unable to", "a map between ICD10-CM and ICD10 (UKBB) Parameters ---------- primaryEncoding : ICDUtilities, optional", "while currentCode.parent_code is not None: currentCode = self.ReturnCodeObject(currentCode.parent_code.code) return str(currentCode) def ReturnSubsumedTerminalCodes(self,parent_code): \"\"\"", "9 08:42:18 2019 @author: davidblair \"\"\" from unidecode import unidecode import pkg_resources import", "chapter_list=[] currentUsableCodeCount = 0 currentUnusableCodeCount = 0 with open(chapterFile,'rb') as f: f.readline() for", "Jul 9 08:42:18 2019 @author: davidblair \"\"\" from unidecode import unidecode import pkg_resources", "line[1].split('-') chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])] chapter_list+=['Chapter_'+line[0]] self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount self.setOfUnusableCodes.add('Chapter_'+line[0]) currentUnusableCodeCount+=1 #now load hierarchy file with open(hierarchyFile,'rb') as", "with open(chapterFile,'rb') as f: f.readline() for line in f: line=self._convertToUnicode(line) line=line.strip('\\n').split('\\t') self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)] start,stop", "line=self._convertToUnicode(line) parsedLine=[] parsedLine+=[line[0:6].strip()] parsedLine+=[line[6:14].strip()] parsedLine+=[line[14:16].strip()] parsedLine+=[line[77:].strip()] currentParentList = self._findParentInList(parsedLine[1],currentParentList) if len(currentParentList) == 0:", "between ICD10-CM and ICD10 (UKBB) Parameters ---------- primaryEncoding : ICDUtilities, optional First encoding.", "structure. Parameters ---------- del_code : str ICD10 code to delete Returns ------- None", "with other encodings but has not been tested. The default is None. chapterFile", "files. Parameters ---------- useICD10UKBB : bool, optional Specifies class to use the UK", "parentList[-1] in code: return parentList else: parentList.pop() return parentList def ReturnCodeObject(self,code): \"\"\" Returns", "matter) by specifying the path to another ICD10 file. ICD9 codebook could be", "StopIteration: raise ValueError('{}'.format(parsedLine[1])) currentParentList +=[icd_chapter] if int(parsedLine[2])==1: self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))] self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount self.setOfUsableCodes.add(parsedLine[1]) currentUsableCodeCount+=1 else: self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))]", "is not None) or (secondaryEncoding is not None): assert (secondaryEncoding is not None)", "add children to terminal node: child = {}, parent = {}\".format(code,parent_code.code)) def __str__(self):", "set(child_code_names).intersection(self.secondaryEncoding.setOfUsableCodes) if len(allowed_child_codes)>0: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[allowed_child_codes] translation_table['Relationship']+=['Child'] translation_table=pd.DataFrame(translation_table) translation_table.set_index('Primary Code',drop=False,inplace=True) return translation_table", "else: hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt' if chapterFile==None: chapterFile = ICD_PATH+'ICD10_Chapters.txt' #quick reference, to avoid having to", "be used instead, but you would need to construct data files that match", "parentList def ReturnCodeObject(self,code): \"\"\" Returns full code object (not just string) for a", "for a given code. Parameters ---------- code : str ICD10 code string. Returns", "The default is None. secondaryEncoding : ICDUtilities, optional Second encoding. The default is", "for that matter) by specifying the path to another ICD10 file. ICD9 codebook", "None. \"\"\" if (primaryEncoding is not None) or (secondaryEncoding is not None): assert", "= ICDUtilities() self.secondaryEncoding=ICDUtilities(useICD10UKBB=True) self.EncodingCoversionTable=self._buildTranslationTable() self.EncodingCoversionTable.to_pickle(ICD_PATH+'icd10cm_to_ukbb.pth') else: self.EncodingCoversionTable=self._buildTranslationTable() def ReturnConversionSet(self,primaryCode,includeRelationship=False): \"\"\" Returns set of", "x[1])] except StopIteration: raise ValueError('{}'.format(parsedLine[1])) currentParentList +=[icd_chapter] if int(parsedLine[2])==1: self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))] self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount self.setOfUsableCodes.add(parsedLine[1]) currentUsableCodeCount+=1", "Indicates if code is parent. The default is None. Raises ------ ValueError If", "code self.associated_string = associated_string self.parent_code = parent_code self.child_codes = [] self.is_terminal=is_terminal self.parent_code =", "the primary code to the new encoding system. Parameters ---------- primaryCode : str", "code. Returns ------- None. \"\"\" self.code = code self.associated_string = associated_string self.parent_code =", "0: intVal = self._convertCodeToIntVal(parsedLine[1][0:3]) try: icd_chapter = chapter_list[next(x[0] for x in enumerate(chapter_breakpoints) if", "del_code : str ICD10 code to delete Returns ------- None \"\"\" all_del_codes=self._deleteCode(del_code) marker_list_usable=", "\"\"\" if code in self.setOfUnusableCodes: return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]] else: return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]] def DeleteCode(self,del_code): \"\"\"", "False. hierarchyFile : str, optional File path to an alternative code hierarchy file.", "parsedLine+=[line[0:6].strip()] parsedLine+=[line[6:14].strip()] parsedLine+=[line[14:16].strip()] parsedLine+=[line[77:].strip()] currentParentList = self._findParentInList(parsedLine[1],currentParentList) if len(currentParentList) == 0: intVal =", "default is None. Returns ------- None. \"\"\" if (primaryEncoding is not None) or", "def _findParentInList(self,code,parentList): while len(parentList) > 0: if parentList[-1] in code: return parentList else:", "list List of ICD10 codes that are children to parent code. \"\"\" all_child_codes", "in del_code_obj.child_codes: del_code_list+=self._deleteCode(child_code.code) return del_code_list def AssignCodeToChapter(self,code): \"\"\" Returns the chapter heading for", "_lettersToInt(self,letter,base): list(string.ascii_uppercase).index(letter)*base def _convertCodeToIntVal(self,code): intVal=0 for base,letter in enumerate(code[::-1]): try: intVal+=int(letter)*10**(base) except ValueError:", "[x.code for x in self.UsableICDCodes] marker_list_unusable = [x.code for x in self.UnusableICDCodes] for", "ICD10 files. Parameters ---------- useICD10UKBB : bool, optional Specifies class to use the", "------- None. \"\"\" self.code = code self.associated_string = associated_string self.parent_code = parent_code self.child_codes", "pkg_resources.resource_filename('vlpi', 'data/ICDData/') class ICDCode: def __init__(self, code, associated_string,is_terminal, parent_code=None): \"\"\" Parameters ---------- code", "Code':[],'Secondary Code(s)':[],'Relationship':[]} for code in self.primaryEncoding.UsableICDCodes: #first check if there is 1:1 mapping", "Returns the chapter heading for input code Parameters ---------- code : str ICD10", "= self.ReturnCodeObject(code) while currentCode.parent_code is not None: currentCode = self.ReturnCodeObject(currentCode.parent_code.code) return str(currentCode) def", "to use the UK Biobank version of ICD10 (not ICD10-CM). The default is", "def ReturnCodeObject(self,code): \"\"\" Returns full code object (not just string) for a given", "codes. #Full list of linked codes self.UsableICDCodes=[] self.usableCodeToIndexMap={} self.setOfUsableCodes=set() self.UnusableICDCodes=[] self.unusableCodeToIndexMap={} self.setOfUnusableCodes=set() #first", "code.parent_code if parent.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([parent.code])] translation_table['Relationship']+=['Parent'] else: if len(code.child_codes)>0:", "try: with open(ICD_PATH+'icd10cm_to_ukbb.pth','rb') as f: self.EncodingCoversionTable = pickle.load(f) except FileNotFoundError: self.primaryEncoding = ICDUtilities()", "and secondary encoding if providing one or the other.\" self.primaryEncoding=primaryEncoding self.secondaryEncoding=secondaryEncoding if primaryEncoding", "of interest. \"\"\" if includeRelationship: look_up=['Secondary Code(s)','Relationship'] else: look_up='Secondary Code(s)' try: return self.EncodingCoversionTable.loc[primaryCode][look_up]", "codebook. is_terminal : bool Indicates whether code is terminal (no children). parent_code :", "is 1:1 mapping between codes if code.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([code.code])]", "there is 1:1 mapping between codes if code.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary", "currentParentList+=[parsedLine[1]] class ICD10TranslationMap: def _buildTranslationTable(self): translation_table={'Primary Code':[],'Secondary Code(s)':[],'Relationship':[]} for code in self.primaryEncoding.UsableICDCodes: #first", "data structure. Parameters ---------- del_code : str ICD10 code to delete Returns -------", "ICD10 codes that are children of the input code. Parameters ---------- parent_code :", "\"\"\" if includeRelationship: look_up=['Secondary Code(s)','Relationship'] else: look_up='Secondary Code(s)' try: return self.EncodingCoversionTable.loc[primaryCode][look_up] except KeyError:", "in all_del_codes: if del_code in self.setOfUnusableCodes: self.UnusableICDCodes.pop(marker_list_unusable.index(del_code)) marker_list_unusable.remove(del_code) else: self.UsableICDCodes.pop(marker_list_usable.index(del_code)) marker_list_usable.remove(del_code) self.usableCodeToIndexMap=dict(zip(marker_list_usable,range(len(marker_list_usable)))) self.unusableCodeToIndexMap=dict(zip(marker_list_unusable,range(len(marker_list_unusable))))", "child in all_child_codes: if child.is_terminal==True: terminal_code_list+=[child.code] else: terminal_code_list+=self.ReturnSubsumedTerminalCodes(child.code) return terminal_code_list def __init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None): \"\"\"", "as f: f.readline() for line in f: line=self._convertToUnicode(line) line=line.strip('\\n').split('\\t') self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)] start,stop = line[1].split('-')", "del_code_obj.parent_code del_code_list=[del_code] if parent_code is not None: parent_code.child_codes.remove(del_code_obj) if del_code_obj.is_terminal==False: for child_code in", "\"\"\" all_del_codes=self._deleteCode(del_code) marker_list_usable= [x.code for x in self.UsableICDCodes] marker_list_unusable = [x.code for x", "del_code_obj.is_terminal==False: for child_code in del_code_obj.child_codes: del_code_list+=self._deleteCode(child_code.code) return del_code_list def AssignCodeToChapter(self,code): \"\"\" Returns the", "ships with 2018 version of ICD10-CM and 2020 version of ICD10 from UK", "ICD10 codes and hierarchy: icd10cm_order_2018.txt, https://www.cms.gov/Medicare/Coding/ICD10/2018-ICD-10-CM-and-GEMs.html By default, the package ships with 2018", "str ICD10 code string. Returns ------- ICDCode ICD10 code class for input string.", "a simple tree (stored as a list called ICDCodes). To initialize the class,", "the path to another ICD10 file. ICD9 codebook could be used instead, but", "with at least some shared codes by taking advantage of shared hierarchical structure.", "<reponame>daverblair/vlpi<gh_stars>1-10 #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Tue Jul", "None: parent_code.child_codes.remove(del_code_obj) if del_code_obj.is_terminal==False: for child_code in del_code_obj.child_codes: del_code_list+=self._deleteCode(child_code.code) return del_code_list def AssignCodeToChapter(self,code):", "not J10.1) Returns ------- terminal_code_list : list List of ICD10 codes that are", "Specifies class to use the UK Biobank version of ICD10 (not ICD10-CM). The", "enumerate(code[::-1]): try: intVal+=int(letter)*10**(base) except ValueError: intVal+=int(list(string.ascii_uppercase).index(letter))*10**(base) return intVal def _findParentInList(self,code,parentList): while len(parentList) >", "code string. Returns ------- ICDCode ICD10 code class for input string. \"\"\" if", "2018 version of ICD10-CM and 2020 version of ICD10 from UK Biobank. You", "could be used instead, but you would need to construct data files that", "but has not been tested. The default is None. Raises ------ ValueError ValueError", "avoid having to search full tree for codes. #Full list of linked codes", "intVal=0 for base,letter in enumerate(code[::-1]): try: intVal+=int(letter)*10**(base) except ValueError: intVal+=int(list(string.ascii_uppercase).index(letter))*10**(base) return intVal def", "self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount self.setOfUnusableCodes.add('Chapter_'+line[0]) currentUnusableCodeCount+=1 #now load hierarchy file with open(hierarchyFile,'rb') as f: currentParentList =", "python3 # -*- coding: utf-8 -*- \"\"\" Created on Tue Jul 9 08:42:18", "enumerate(chapter_breakpoints) if intVal <= x[1])] except StopIteration: raise ValueError('{}'.format(parsedLine[1])) currentParentList +=[icd_chapter] if int(parsedLine[2])==1:", "DeleteCode(self,del_code): \"\"\" Removes the ICD code and all children (if exist) from data", "code.child_codes] allowed_child_codes = set(child_code_names).intersection(self.secondaryEncoding.setOfUsableCodes) if len(allowed_child_codes)>0: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[allowed_child_codes] translation_table['Relationship']+=['Child'] translation_table=pd.DataFrame(translation_table) translation_table.set_index('Primary", "shared hierarchical structure. If primaryEncoding and secondaryEncoding are unspecified, class creates a map", "len(currentParentList) == 0: intVal = self._convertCodeToIntVal(parsedLine[1][0:3]) try: icd_chapter = chapter_list[next(x[0] for x in", "intVal = self._convertCodeToIntVal(parsedLine[1][0:3]) try: icd_chapter = chapter_list[next(x[0] for x in enumerate(chapter_breakpoints) if intVal", "= self.ReturnCodeObject(currentCode.parent_code.code) return str(currentCode) def ReturnSubsumedTerminalCodes(self,parent_code): \"\"\" Returns all ICD10 codes that are", "parentList else: parentList.pop() return parentList def ReturnCodeObject(self,code): \"\"\" Returns full code object (not", "in code.child_codes] allowed_child_codes = set(child_code_names).intersection(self.secondaryEncoding.setOfUsableCodes) if len(allowed_child_codes)>0: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[allowed_child_codes] translation_table['Relationship']+=['Child'] translation_table=pd.DataFrame(translation_table)", "def ReturnConversionSet(self,primaryCode,includeRelationship=False): \"\"\" Returns set of codes that represent the mapping of the", "ICD code and all children (if exist) from data structure. Parameters ---------- del_code", "with open(hierarchyFile,'rb') as f: currentParentList = [] for line in f: line=self._convertToUnicode(line) parsedLine=[]", "str, optional File path to an alternative code chapter file (ie main groups", "def DeleteCode(self,del_code): \"\"\" Removes the ICD code and all children (if exist) from", "data files that match the structure of the ICD10 files. Parameters ---------- useICD10UKBB", "if parentList[-1] in code: return parentList else: parentList.pop() return parentList def ReturnCodeObject(self,code): \"\"\"", "parent_code self.child_codes = [] self.is_terminal=is_terminal self.parent_code = parent_code if parent_code is not None:", "------- terminal_code_list : list List of ICD10 codes that are children to parent", "'data/ICDData/') class ICDCode: def __init__(self, code, associated_string,is_terminal, parent_code=None): \"\"\" Parameters ---------- code :", "taking advantage of shared hierarchical structure. If primaryEncoding and secondaryEncoding are unspecified, class", "use the UK Biobank version of ICD10 (not ICD10-CM). The default is False.", "or the other.\" self.primaryEncoding=primaryEncoding self.secondaryEncoding=secondaryEncoding if primaryEncoding is None: try: with open(ICD_PATH+'icd10cm_to_ukbb.pth','rb') as", "as a simple tree (stored as a list called ICDCodes). To initialize the", "def __str__(self): return self.associated_string def __repr__(self): return self.code class ICDUtilities: def _convertToUnicode(self,byteString): return", "downgrade for that matter) by specifying the path to another ICD10 file. ICD9", "Parameters ---------- useICD10UKBB : bool, optional Specifies class to use the UK Biobank", "object (not just string) for a given code. Parameters ---------- code : str", "= chapter_list[next(x[0] for x in enumerate(chapter_breakpoints) if intVal <= x[1])] except StopIteration: raise", "pkg_resources import string import pickle import pandas as pd ICD_PATH = pkg_resources.resource_filename('vlpi', 'data/ICDData/')", "tree (stored as a list called ICDCodes). To initialize the class, expects flat", "children (if exist) from data structure. Parameters ---------- del_code : str ICD10 code", "not been tested. The default is None. Raises ------ ValueError ValueError raised if", "del_code in all_del_codes: if del_code in self.setOfUnusableCodes: self.UnusableICDCodes.pop(marker_list_unusable.index(del_code)) marker_list_unusable.remove(del_code) else: self.UsableICDCodes.pop(marker_list_usable.index(del_code)) marker_list_usable.remove(del_code) self.usableCodeToIndexMap=dict(zip(marker_list_usable,range(len(marker_list_usable))))", "To initialize the class, expects flat two text files: 1) ICD10_Chapters.txt--chapter heading for", "parent = code.parent_code if parent.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([parent.code])] translation_table['Relationship']+=['Parent'] else:", "and (secondaryEncoding is not None), \"Must specify primary and secondary encoding if providing", "ValueError raised if unable to parse some line. Prints out the line of", "code to the new encoding system. Parameters ---------- primaryCode : str Diagnostic code", "parent_code.is_terminal==False: parent_code.child_codes.append(self) else: raise ValueError(\"Attempting to add children to terminal node: child =", "Returns ------- str ICD10 chapter. \"\"\" code = code.replace('.','') currentCode = self.ReturnCodeObject(code) while", "that represent the mapping of the primary code to the new encoding system.", "If primaryEncoding and secondaryEncoding are unspecified, class creates a map between ICD10-CM and", "Builds translation map between two ICD Utilities with at least some shared codes", "hierarchyFile=ICD_PATH+'icd10_ukbb.txt' else: hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt' if chapterFile==None: chapterFile = ICD_PATH+'ICD10_Chapters.txt' #quick reference, to avoid having", "secondary encoding if providing one or the other.\" self.primaryEncoding=primaryEncoding self.secondaryEncoding=secondaryEncoding if primaryEncoding is", "is_terminal : bool Indicates whether code is terminal (no children). parent_code : bool,", "of codes that represent the mapping of the primary code to the new", "\"\"\" from unidecode import unidecode import pkg_resources import string import pickle import pandas", "self.UnusableICDCodes.pop(marker_list_unusable.index(del_code)) marker_list_unusable.remove(del_code) else: self.UsableICDCodes.pop(marker_list_usable.index(del_code)) marker_list_usable.remove(del_code) self.usableCodeToIndexMap=dict(zip(marker_list_usable,range(len(marker_list_usable)))) self.unusableCodeToIndexMap=dict(zip(marker_list_unusable,range(len(marker_list_unusable)))) self.setOfUnusableCodes=set(self.unusableCodeToIndexMap.keys()) self.setOfUsableCodes=set(self.usableCodeToIndexMap.keys()) def _deleteCode(self,del_code): del_code_obj =", "default is None. secondaryEncoding : ICDUtilities, optional Second encoding. The default is None.", "parent_code is not None: parent_code.child_codes.remove(del_code_obj) if del_code_obj.is_terminal==False: for child_code in del_code_obj.child_codes: del_code_list+=self._deleteCode(child_code.code) return", "\"\"\" if (primaryEncoding is not None) or (secondaryEncoding is not None): assert (secondaryEncoding", "self.UnusableICDCodes=[] self.unusableCodeToIndexMap={} self.setOfUnusableCodes=set() #first load the chapters chapter_breakpoints=[] chapter_list=[] currentUsableCodeCount = 0 currentUnusableCodeCount", "code Parameters ---------- code : str ICD10 code. Returns ------- str ICD10 chapter.", "parsedLine=[] parsedLine+=[line[0:6].strip()] parsedLine+=[line[6:14].strip()] parsedLine+=[line[14:16].strip()] parsedLine+=[line[77:].strip()] currentParentList = self._findParentInList(parsedLine[1],currentParentList) if len(currentParentList) == 0: intVal", "#Full list of linked codes self.UsableICDCodes=[] self.usableCodeToIndexMap={} self.setOfUsableCodes=set() self.UnusableICDCodes=[] self.unusableCodeToIndexMap={} self.setOfUnusableCodes=set() #first load", ": list List of ICD10 codes that are children to parent code. \"\"\"", "del_code_obj.child_codes: del_code_list+=self._deleteCode(child_code.code) return del_code_list def AssignCodeToChapter(self,code): \"\"\" Returns the chapter heading for input", "code. Do not include periods (ie J101 not J10.1) Returns ------- terminal_code_list :", "parent_code if parent_code is not None: if parent_code.is_terminal==False: parent_code.child_codes.append(self) else: raise ValueError(\"Attempting to", "class ICDUtilities: def _convertToUnicode(self,byteString): return unidecode(str(byteString,\"ISO-8859-1\")) def _lettersToInt(self,letter,base): list(string.ascii_uppercase).index(letter)*base def _convertCodeToIntVal(self,code): intVal=0 for", "all_child_codes = self.ReturnCodeObject(parent_code).child_codes terminal_code_list=[] for child in all_child_codes: if child.is_terminal==True: terminal_code_list+=[child.code] else: terminal_code_list+=self.ReturnSubsumedTerminalCodes(child.code)", "initialize the class, expects flat two text files: 1) ICD10_Chapters.txt--chapter heading for all", "chapters chapter_breakpoints=[] chapter_list=[] currentUsableCodeCount = 0 currentUnusableCodeCount = 0 with open(chapterFile,'rb') as f:", "if primaryEncoding is None: try: with open(ICD_PATH+'icd10cm_to_ukbb.pth','rb') as f: self.EncodingCoversionTable = pickle.load(f) except", "all_del_codes=self._deleteCode(del_code) marker_list_usable= [x.code for x in self.UsableICDCodes] marker_list_unusable = [x.code for x in", "currentCode = self.ReturnCodeObject(currentCode.parent_code.code) return str(currentCode) def ReturnSubsumedTerminalCodes(self,parent_code): \"\"\" Returns all ICD10 codes that", "the codes. Manually constructed. 2) ICD10 codes and hierarchy: icd10cm_order_2018.txt, https://www.cms.gov/Medicare/Coding/ICD10/2018-ICD-10-CM-and-GEMs.html By default,", "Parameters ---------- del_code : str ICD10 code to delete Returns ------- None \"\"\"", "import pandas as pd ICD_PATH = pkg_resources.resource_filename('vlpi', 'data/ICDData/') class ICDCode: def __init__(self, code,", "def __init__(self,primaryEncoding=None,secondaryEncoding=None): \"\"\" Builds translation map between two ICD Utilities with at least", "---------- primaryEncoding : ICDUtilities, optional First encoding. The default is None. secondaryEncoding :", "\"\"\" Created on Tue Jul 9 08:42:18 2019 @author: davidblair \"\"\" from unidecode", "import unidecode import pkg_resources import string import pickle import pandas as pd ICD_PATH", "Tue Jul 9 08:42:18 2019 @author: davidblair \"\"\" from unidecode import unidecode import", "terminal (no children). parent_code : bool, optional Indicates if code is parent. The", "self.parent_code = parent_code self.child_codes = [] self.is_terminal=is_terminal self.parent_code = parent_code if parent_code is", "of ICD10 from UK Biobank. You can upgrade to 2019 (or downgrade for", "to an alternative code chapter file (ie main groups of codes). Again, this", "translation_table['Relationship']+=['Parent'] else: if len(code.child_codes)>0: child_code_names = [x.code for x in code.child_codes] allowed_child_codes =", "files: 1) ICD10_Chapters.txt--chapter heading for all the codes. Manually constructed. 2) ICD10 codes", "system. Parameters ---------- primaryCode : str Diagnostic code to be converted . includeRelationship", "if parent_code is not None: parent_code.child_codes.remove(del_code_obj) if del_code_obj.is_terminal==False: for child_code in del_code_obj.child_codes: del_code_list+=self._deleteCode(child_code.code)", "The default is None. chapterFile : str, optional File path to an alternative", "= 0 currentUnusableCodeCount = 0 with open(chapterFile,'rb') as f: f.readline() for line in", "Do not include periods (ie J101 not J10.1) Returns ------- terminal_code_list : list", "some shared codes by taking advantage of shared hierarchical structure. If primaryEncoding and", "(secondaryEncoding is not None), \"Must specify primary and secondary encoding if providing one", "from UK Biobank. You can upgrade to 2019 (or downgrade for that matter)", "primary and secondary encoding if providing one or the other.\" self.primaryEncoding=primaryEncoding self.secondaryEncoding=secondaryEncoding if", "self.setOfUnusableCodes: self.UnusableICDCodes.pop(marker_list_unusable.index(del_code)) marker_list_unusable.remove(del_code) else: self.UsableICDCodes.pop(marker_list_usable.index(del_code)) marker_list_usable.remove(del_code) self.usableCodeToIndexMap=dict(zip(marker_list_usable,range(len(marker_list_usable)))) self.unusableCodeToIndexMap=dict(zip(marker_list_unusable,range(len(marker_list_unusable)))) self.setOfUnusableCodes=set(self.unusableCodeToIndexMap.keys()) self.setOfUsableCodes=set(self.usableCodeToIndexMap.keys()) def _deleteCode(self,del_code): del_code_obj", "is None. Returns ------- None. \"\"\" if (primaryEncoding is not None) or (secondaryEncoding", "FileNotFoundError: self.primaryEncoding = ICDUtilities() self.secondaryEncoding=ICDUtilities(useICD10UKBB=True) self.EncodingCoversionTable=self._buildTranslationTable() self.EncodingCoversionTable.to_pickle(ICD_PATH+'icd10cm_to_ukbb.pth') else: self.EncodingCoversionTable=self._buildTranslationTable() def ReturnConversionSet(self,primaryCode,includeRelationship=False): \"\"\" Returns", "else: if len(code.child_codes)>0: child_code_names = [x.code for x in code.child_codes] allowed_child_codes = set(child_code_names).intersection(self.secondaryEncoding.setOfUsableCodes)", "if (primaryEncoding is not None) or (secondaryEncoding is not None): assert (secondaryEncoding is", "codes to known parent code. Returns ------- None. \"\"\" self.code = code self.associated_string", "self.unusableCodeToIndexMap={} self.setOfUnusableCodes=set() #first load the chapters chapter_breakpoints=[] chapter_list=[] currentUsableCodeCount = 0 currentUnusableCodeCount =", "self.is_terminal=is_terminal self.parent_code = parent_code if parent_code is not None: if parent_code.is_terminal==False: parent_code.child_codes.append(self) else:", "by taking advantage of shared hierarchical structure. If primaryEncoding and secondaryEncoding are unspecified,", "Returns ------- None. \"\"\" if (primaryEncoding is not None) or (secondaryEncoding is not", "(stored as a list called ICDCodes). To initialize the class, expects flat two", "= self._findParentInList(parsedLine[1],currentParentList) if len(currentParentList) == 0: intVal = self._convertCodeToIntVal(parsedLine[1][0:3]) try: icd_chapter = chapter_list[next(x[0]", "self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)] start,stop = line[1].split('-') chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])] chapter_list+=['Chapter_'+line[0]] self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount self.setOfUnusableCodes.add('Chapter_'+line[0]) currentUnusableCodeCount+=1 #now load hierarchy file", "primaryEncoding and secondaryEncoding are unspecified, class creates a map between ICD10-CM and ICD10", "addition to code. The default is False. Returns ------- set Set of codes", "for input string. \"\"\" if code in self.setOfUnusableCodes: return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]] else: return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]]", "work with other encodings but has not been tested. The default is None.", "ValueError ValueError raised if unable to parse some line. Prints out the line", "------ ValueError If unable to add child codes to known parent code. Returns", "None), \"Must specify primary and secondary encoding if providing one or the other.\"", "ICD10-CM and 2020 version of ICD10 from UK Biobank. You can upgrade to", "while len(parentList) > 0: if parentList[-1] in code: return parentList else: parentList.pop() return", "= self.ReturnCodeObject(del_code) parent_code = del_code_obj.parent_code del_code_list=[del_code] if parent_code is not None: parent_code.child_codes.remove(del_code_obj) if", "all_del_codes: if del_code in self.setOfUnusableCodes: self.UnusableICDCodes.pop(marker_list_unusable.index(del_code)) marker_list_unusable.remove(del_code) else: self.UsableICDCodes.pop(marker_list_usable.index(del_code)) marker_list_usable.remove(del_code) self.usableCodeToIndexMap=dict(zip(marker_list_usable,range(len(marker_list_usable)))) self.unusableCodeToIndexMap=dict(zip(marker_list_unusable,range(len(marker_list_unusable)))) self.setOfUnusableCodes=set(self.unusableCodeToIndexMap.keys())", "__init__(self, code, associated_string,is_terminal, parent_code=None): \"\"\" Parameters ---------- code : str ICD10 code string.", "ICD10 codes that are children to parent code. \"\"\" all_child_codes = self.ReturnCodeObject(parent_code).child_codes terminal_code_list=[]", "unidecode import unidecode import pkg_resources import string import pickle import pandas as pd", "encoding if providing one or the other.\" self.primaryEncoding=primaryEncoding self.secondaryEncoding=secondaryEncoding if primaryEncoding is None:", "None: currentCode = self.ReturnCodeObject(currentCode.parent_code.code) return str(currentCode) def ReturnSubsumedTerminalCodes(self,parent_code): \"\"\" Returns all ICD10 codes", "list called ICDCodes). To initialize the class, expects flat two text files: 1)", "code, associated_string,is_terminal, parent_code=None): \"\"\" Parameters ---------- code : str ICD10 code string. associated_string", "path to an alternative code chapter file (ie main groups of codes). Again,", "and secondaryEncoding are unspecified, class creates a map between ICD10-CM and ICD10 (UKBB)", "from unidecode import unidecode import pkg_resources import string import pickle import pandas as", "or (secondaryEncoding is not None): assert (secondaryEncoding is not None) and (secondaryEncoding is", "file. This may (unlikely) work with other encodings but has not been tested.", "(secondaryEncoding is not None) and (secondaryEncoding is not None), \"Must specify primary and", "to parse some line. Prints out the line of interest. Returns ------- None.", "J10.1) Returns ------- terminal_code_list : list List of ICD10 codes that are children", "useICD10UKBB: hierarchyFile=ICD_PATH+'icd10_ukbb.txt' else: hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt' if chapterFile==None: chapterFile = ICD_PATH+'ICD10_Chapters.txt' #quick reference, to avoid", "converted . includeRelationship : bool, optional. Specicies whether to return the relationship type", "return self.code class ICDUtilities: def _convertToUnicode(self,byteString): return unidecode(str(byteString,\"ISO-8859-1\")) def _lettersToInt(self,letter,base): list(string.ascii_uppercase).index(letter)*base def _convertCodeToIntVal(self,code):", "int(parsedLine[2])==1: self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))] self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount self.setOfUsableCodes.add(parsedLine[1]) currentUsableCodeCount+=1 else: self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))] self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount self.setOfUnusableCodes.add(parsedLine[1]) currentUnusableCodeCount+=1 currentParentList+=[parsedLine[1]] class ICD10TranslationMap:", "if parent_code.is_terminal==False: parent_code.child_codes.append(self) else: raise ValueError(\"Attempting to add children to terminal node: child", "not None) and (secondaryEncoding is not None), \"Must specify primary and secondary encoding", "ValueError: intVal+=int(list(string.ascii_uppercase).index(letter))*10**(base) return intVal def _findParentInList(self,code,parentList): while len(parentList) > 0: if parentList[-1] in", "line in f: line=self._convertToUnicode(line) parsedLine=[] parsedLine+=[line[0:6].strip()] parsedLine+=[line[6:14].strip()] parsedLine+=[line[14:16].strip()] parsedLine+=[line[77:].strip()] currentParentList = self._findParentInList(parsedLine[1],currentParentList) if", "if int(parsedLine[2])==1: self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))] self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount self.setOfUsableCodes.add(parsedLine[1]) currentUsableCodeCount+=1 else: self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))] self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount self.setOfUnusableCodes.add(parsedLine[1]) currentUnusableCodeCount+=1 currentParentList+=[parsedLine[1]] class", "hierarchical structure. If primaryEncoding and secondaryEncoding are unspecified, class creates a map between", "code chapter file (ie main groups of codes). Again, this may work with", "add child codes to known parent code. Returns ------- None. \"\"\" self.code =", "codebook as a simple tree (stored as a list called ICDCodes). To initialize", "to the code of interest. \"\"\" if includeRelationship: look_up=['Secondary Code(s)','Relationship'] else: look_up='Secondary Code(s)'", "2019 @author: davidblair \"\"\" from unidecode import unidecode import pkg_resources import string import", "ICD10 code class for input string. \"\"\" if code in self.setOfUnusableCodes: return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]]", "terminal_code_list+=[child.code] else: terminal_code_list+=self.ReturnSubsumedTerminalCodes(child.code) return terminal_code_list def __init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None): \"\"\" Class that manipulates the ICD10", "you would need to construct data files that match the structure of the", "translation_table=pd.DataFrame(translation_table) translation_table.set_index('Primary Code',drop=False,inplace=True) return translation_table def __init__(self,primaryEncoding=None,secondaryEncoding=None): \"\"\" Builds translation map between two", "def AssignCodeToChapter(self,code): \"\"\" Returns the chapter heading for input code Parameters ---------- code", ": ICDUtilities, optional First encoding. The default is None. secondaryEncoding : ICDUtilities, optional", "to code. The default is False. Returns ------- set Set of codes aligned", "to add child codes to known parent code. Returns ------- None. \"\"\" self.code", "= code.parent_code if parent.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([parent.code])] translation_table['Relationship']+=['Parent'] else: if", "class creates a map between ICD10-CM and ICD10 (UKBB) Parameters ---------- primaryEncoding :", "for x in self.UsableICDCodes] marker_list_unusable = [x.code for x in self.UnusableICDCodes] for del_code", "package ships with 2018 version of ICD10-CM and 2020 version of ICD10 from", "to an alternative code hierarchy file. This may (unlikely) work with other encodings", "not None): assert (secondaryEncoding is not None) and (secondaryEncoding is not None), \"Must", "[x.code for x in self.UnusableICDCodes] for del_code in all_del_codes: if del_code in self.setOfUnusableCodes:", "string for parent code. Do not include periods (ie J101 not J10.1) Returns", "of ICD10 codes that are children to parent code. \"\"\" all_child_codes = self.ReturnCodeObject(parent_code).child_codes", "for all the codes. Manually constructed. 2) ICD10 codes and hierarchy: icd10cm_order_2018.txt, https://www.cms.gov/Medicare/Coding/ICD10/2018-ICD-10-CM-and-GEMs.html", "not None) or (secondaryEncoding is not None): assert (secondaryEncoding is not None) and", "ICD10_Chapters.txt--chapter heading for all the codes. Manually constructed. 2) ICD10 codes and hierarchy:", "code is parent. The default is None. Raises ------ ValueError If unable to", "this may work with other encodings but has not been tested. The default", "\"\"\" self.code = code self.associated_string = associated_string self.parent_code = parent_code self.child_codes = []", "in self.setOfUnusableCodes: self.UnusableICDCodes.pop(marker_list_unusable.index(del_code)) marker_list_unusable.remove(del_code) else: self.UsableICDCodes.pop(marker_list_usable.index(del_code)) marker_list_usable.remove(del_code) self.usableCodeToIndexMap=dict(zip(marker_list_usable,range(len(marker_list_usable)))) self.unusableCodeToIndexMap=dict(zip(marker_list_unusable,range(len(marker_list_unusable)))) self.setOfUnusableCodes=set(self.unusableCodeToIndexMap.keys()) self.setOfUsableCodes=set(self.usableCodeToIndexMap.keys()) def _deleteCode(self,del_code):", "and 2020 version of ICD10 from UK Biobank. You can upgrade to 2019", "just string) for a given code. Parameters ---------- code : str ICD10 code", "def _convertToUnicode(self,byteString): return unidecode(str(byteString,\"ISO-8859-1\")) def _lettersToInt(self,letter,base): list(string.ascii_uppercase).index(letter)*base def _convertCodeToIntVal(self,code): intVal=0 for base,letter in", "del_code_list def AssignCodeToChapter(self,code): \"\"\" Returns the chapter heading for input code Parameters ----------", ": bool Indicates whether code is terminal (no children). parent_code : bool, optional", "default is None. Raises ------ ValueError If unable to add child codes to", "been tested. The default is None. chapterFile : str, optional File path to", "str ICD10 code to delete Returns ------- None \"\"\" all_del_codes=self._deleteCode(del_code) marker_list_usable= [x.code for", "in all_child_codes: if child.is_terminal==True: terminal_code_list+=[child.code] else: terminal_code_list+=self.ReturnSubsumedTerminalCodes(child.code) return terminal_code_list def __init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None): \"\"\" Class", "None) or (secondaryEncoding is not None): assert (secondaryEncoding is not None) and (secondaryEncoding", ". includeRelationship : bool, optional. Specicies whether to return the relationship type in", "if code in self.setOfUnusableCodes: return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]] else: return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]] def DeleteCode(self,del_code): \"\"\" Removes", "for base,letter in enumerate(code[::-1]): try: intVal+=int(letter)*10**(base) except ValueError: intVal+=int(list(string.ascii_uppercase).index(letter))*10**(base) return intVal def _findParentInList(self,code,parentList):", "of the input code. Parameters ---------- parent_code : str ICD10 string for parent", "chapterFile==None: chapterFile = ICD_PATH+'ICD10_Chapters.txt' #quick reference, to avoid having to search full tree", "[] for line in f: line=self._convertToUnicode(line) parsedLine=[] parsedLine+=[line[0:6].strip()] parsedLine+=[line[6:14].strip()] parsedLine+=[line[14:16].strip()] parsedLine+=[line[77:].strip()] currentParentList =", "codes aligned to the code of interest. \"\"\" if includeRelationship: look_up=['Secondary Code(s)','Relationship'] else:", "self._convertCodeToIntVal(parsedLine[1][0:3]) try: icd_chapter = chapter_list[next(x[0] for x in enumerate(chapter_breakpoints) if intVal <= x[1])]", "the mapping of the primary code to the new encoding system. Parameters ----------", "ReturnSubsumedTerminalCodes(self,parent_code): \"\"\" Returns all ICD10 codes that are children of the input code.", "tree for codes. #Full list of linked codes self.UsableICDCodes=[] self.usableCodeToIndexMap={} self.setOfUsableCodes=set() self.UnusableICDCodes=[] self.unusableCodeToIndexMap={}", ": ICDUtilities, optional Second encoding. The default is None. Returns ------- None. \"\"\"", "a list called ICDCodes). To initialize the class, expects flat two text files:", "parent_code : str ICD10 string for parent code. Do not include periods (ie", "parent_code is not None: if parent_code.is_terminal==False: parent_code.child_codes.append(self) else: raise ValueError(\"Attempting to add children", "codes that are children to parent code. \"\"\" all_child_codes = self.ReturnCodeObject(parent_code).child_codes terminal_code_list=[] for", "(or downgrade for that matter) by specifying the path to another ICD10 file.", "code hierarchy file. This may (unlikely) work with other encodings but has not", "== 0: intVal = self._convertCodeToIntVal(parsedLine[1][0:3]) try: icd_chapter = chapter_list[next(x[0] for x in enumerate(chapter_breakpoints)", "in enumerate(code[::-1]): try: intVal+=int(letter)*10**(base) except ValueError: intVal+=int(list(string.ascii_uppercase).index(letter))*10**(base) return intVal def _findParentInList(self,code,parentList): while len(parentList)", "to 2019 (or downgrade for that matter) by specifying the path to another", "all_child_codes: if child.is_terminal==True: terminal_code_list+=[child.code] else: terminal_code_list+=self.ReturnSubsumedTerminalCodes(child.code) return terminal_code_list def __init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None): \"\"\" Class that", "Manually constructed. 2) ICD10 codes and hierarchy: icd10cm_order_2018.txt, https://www.cms.gov/Medicare/Coding/ICD10/2018-ICD-10-CM-and-GEMs.html By default, the package", "some line. Prints out the line of interest. Returns ------- None. \"\"\" if", "unable to add child codes to known parent code. Returns ------- None. \"\"\"", "marker_list_unusable.remove(del_code) else: self.UsableICDCodes.pop(marker_list_usable.index(del_code)) marker_list_usable.remove(del_code) self.usableCodeToIndexMap=dict(zip(marker_list_usable,range(len(marker_list_usable)))) self.unusableCodeToIndexMap=dict(zip(marker_list_unusable,range(len(marker_list_unusable)))) self.setOfUnusableCodes=set(self.unusableCodeToIndexMap.keys()) self.setOfUsableCodes=set(self.usableCodeToIndexMap.keys()) def _deleteCode(self,del_code): del_code_obj = self.ReturnCodeObject(del_code)", "string. Returns ------- ICDCode ICD10 code class for input string. \"\"\" if code", "default is None. Raises ------ ValueError ValueError raised if unable to parse some", "terminal_code_list+=self.ReturnSubsumedTerminalCodes(child.code) return terminal_code_list def __init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None): \"\"\" Class that manipulates the ICD10 codebook. It", "to another ICD10 file. ICD9 codebook could be used instead, but you would", "self.primaryEncoding=primaryEncoding self.secondaryEncoding=secondaryEncoding if primaryEncoding is None: try: with open(ICD_PATH+'icd10cm_to_ukbb.pth','rb') as f: self.EncodingCoversionTable =", "_convertToUnicode(self,byteString): return unidecode(str(byteString,\"ISO-8859-1\")) def _lettersToInt(self,letter,base): list(string.ascii_uppercase).index(letter)*base def _convertCodeToIntVal(self,code): intVal=0 for base,letter in enumerate(code[::-1]):", "parent_code.child_codes.remove(del_code_obj) if del_code_obj.is_terminal==False: for child_code in del_code_obj.child_codes: del_code_list+=self._deleteCode(child_code.code) return del_code_list def AssignCodeToChapter(self,code): \"\"\"", "in enumerate(chapter_breakpoints) if intVal <= x[1])] except StopIteration: raise ValueError('{}'.format(parsedLine[1])) currentParentList +=[icd_chapter] if", "between two ICD Utilities with at least some shared codes by taking advantage", "codes that are children of the input code. Parameters ---------- parent_code : str", "str ICD10 code string. associated_string : str String defining code in codebook. is_terminal", "self.setOfUnusableCodes.add('Chapter_'+line[0]) currentUnusableCodeCount+=1 #now load hierarchy file with open(hierarchyFile,'rb') as f: currentParentList = []", "self.EncodingCoversionTable=self._buildTranslationTable() self.EncodingCoversionTable.to_pickle(ICD_PATH+'icd10cm_to_ukbb.pth') else: self.EncodingCoversionTable=self._buildTranslationTable() def ReturnConversionSet(self,primaryCode,includeRelationship=False): \"\"\" Returns set of codes that represent", "to add children to terminal node: child = {}, parent = {}\".format(code,parent_code.code)) def", "self.EncodingCoversionTable.to_pickle(ICD_PATH+'icd10cm_to_ukbb.pth') else: self.EncodingCoversionTable=self._buildTranslationTable() def ReturnConversionSet(self,primaryCode,includeRelationship=False): \"\"\" Returns set of codes that represent the", "would need to construct data files that match the structure of the ICD10", "\"\"\" all_child_codes = self.ReturnCodeObject(parent_code).child_codes terminal_code_list=[] for child in all_child_codes: if child.is_terminal==True: terminal_code_list+=[child.code] else:", "be converted . includeRelationship : bool, optional. Specicies whether to return the relationship", ": str String defining code in codebook. is_terminal : bool Indicates whether code", "parent code. Do not include periods (ie J101 not J10.1) Returns ------- terminal_code_list", "https://www.cms.gov/Medicare/Coding/ICD10/2018-ICD-10-CM-and-GEMs.html By default, the package ships with 2018 version of ICD10-CM and 2020", "self.setOfUnusableCodes=set() #first load the chapters chapter_breakpoints=[] chapter_list=[] currentUsableCodeCount = 0 currentUnusableCodeCount = 0", "if parent.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([parent.code])] translation_table['Relationship']+=['Parent'] else: if len(code.child_codes)>0: child_code_names", "None. secondaryEncoding : ICDUtilities, optional Second encoding. The default is None. Returns -------", "\"Must specify primary and secondary encoding if providing one or the other.\" self.primaryEncoding=primaryEncoding", "in f: line=self._convertToUnicode(line) parsedLine=[] parsedLine+=[line[0:6].strip()] parsedLine+=[line[6:14].strip()] parsedLine+=[line[14:16].strip()] parsedLine+=[line[77:].strip()] currentParentList = self._findParentInList(parsedLine[1],currentParentList) if len(currentParentList)", "self.setOfUnusableCodes=set(self.unusableCodeToIndexMap.keys()) self.setOfUsableCodes=set(self.usableCodeToIndexMap.keys()) def _deleteCode(self,del_code): del_code_obj = self.ReturnCodeObject(del_code) parent_code = del_code_obj.parent_code del_code_list=[del_code] if parent_code", "Removes the ICD code and all children (if exist) from data structure. Parameters", ": str Diagnostic code to be converted . includeRelationship : bool, optional. Specicies", "input string. \"\"\" if code in self.setOfUnusableCodes: return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]] else: return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]] def", "return del_code_list def AssignCodeToChapter(self,code): \"\"\" Returns the chapter heading for input code Parameters", "str, optional File path to an alternative code hierarchy file. This may (unlikely)", "File path to an alternative code chapter file (ie main groups of codes).", "= ICD_PATH+'ICD10_Chapters.txt' #quick reference, to avoid having to search full tree for codes.", "ICD_PATH+'ICD10_Chapters.txt' #quick reference, to avoid having to search full tree for codes. #Full", "self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]] else: return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]] def DeleteCode(self,del_code): \"\"\" Removes the ICD code and all", "if useICD10UKBB: hierarchyFile=ICD_PATH+'icd10_ukbb.txt' else: hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt' if chapterFile==None: chapterFile = ICD_PATH+'ICD10_Chapters.txt' #quick reference, to", "= line[1].split('-') chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])] chapter_list+=['Chapter_'+line[0]] self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount self.setOfUnusableCodes.add('Chapter_'+line[0]) currentUnusableCodeCount+=1 #now load hierarchy file with open(hierarchyFile,'rb')", "code : str ICD10 code string. associated_string : str String defining code in", "terminal_code_list def __init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None): \"\"\" Class that manipulates the ICD10 codebook. It stores the", "currentCode.parent_code is not None: currentCode = self.ReturnCodeObject(currentCode.parent_code.code) return str(currentCode) def ReturnSubsumedTerminalCodes(self,parent_code): \"\"\" Returns", "Parameters ---------- code : str ICD10 code string. Returns ------- ICDCode ICD10 code", "not None: currentCode = self.ReturnCodeObject(currentCode.parent_code.code) return str(currentCode) def ReturnSubsumedTerminalCodes(self,parent_code): \"\"\" Returns all ICD10", "of ICD10 (not ICD10-CM). The default is False. hierarchyFile : str, optional File", "list(string.ascii_uppercase).index(letter)*base def _convertCodeToIntVal(self,code): intVal=0 for base,letter in enumerate(code[::-1]): try: intVal+=int(letter)*10**(base) except ValueError: intVal+=int(list(string.ascii_uppercase).index(letter))*10**(base)", "None. chapterFile : str, optional File path to an alternative code chapter file", "if code.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([code.code])] translation_table['Relationship']+=['Direct'] else: parent = code.parent_code", "self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([parent.code])] translation_table['Relationship']+=['Parent'] else: if len(code.child_codes)>0: child_code_names = [x.code for", "includeRelationship : bool, optional. Specicies whether to return the relationship type in addition", "line=line.strip('\\n').split('\\t') self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)] start,stop = line[1].split('-') chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])] chapter_list+=['Chapter_'+line[0]] self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount self.setOfUnusableCodes.add('Chapter_'+line[0]) currentUnusableCodeCount+=1 #now load hierarchy", "# -*- coding: utf-8 -*- \"\"\" Created on Tue Jul 9 08:42:18 2019", "line in f: line=self._convertToUnicode(line) line=line.strip('\\n').split('\\t') self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)] start,stop = line[1].split('-') chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])] chapter_list+=['Chapter_'+line[0]] self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount self.setOfUnusableCodes.add('Chapter_'+line[0])", "return intVal def _findParentInList(self,code,parentList): while len(parentList) > 0: if parentList[-1] in code: return", "_convertCodeToIntVal(self,code): intVal=0 for base,letter in enumerate(code[::-1]): try: intVal+=int(letter)*10**(base) except ValueError: intVal+=int(list(string.ascii_uppercase).index(letter))*10**(base) return intVal", "if del_code_obj.is_terminal==False: for child_code in del_code_obj.child_codes: del_code_list+=self._deleteCode(child_code.code) return del_code_list def AssignCodeToChapter(self,code): \"\"\" Returns", "ICD9 codebook could be used instead, but you would need to construct data", "code in self.primaryEncoding.UsableICDCodes: #first check if there is 1:1 mapping between codes if", "------- str ICD10 chapter. \"\"\" code = code.replace('.','') currentCode = self.ReturnCodeObject(code) while currentCode.parent_code", "(no children). parent_code : bool, optional Indicates if code is parent. The default", "between codes if code.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([code.code])] translation_table['Relationship']+=['Direct'] else: parent", "other encodings but has not been tested. The default is None. Raises ------", "if len(allowed_child_codes)>0: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[allowed_child_codes] translation_table['Relationship']+=['Child'] translation_table=pd.DataFrame(translation_table) translation_table.set_index('Primary Code',drop=False,inplace=True) return translation_table def", "------ ValueError ValueError raised if unable to parse some line. Prints out the", "return translation_table def __init__(self,primaryEncoding=None,secondaryEncoding=None): \"\"\" Builds translation map between two ICD Utilities with", "out the line of interest. Returns ------- None. \"\"\" if hierarchyFile==None: if useICD10UKBB:", "for codes. #Full list of linked codes self.UsableICDCodes=[] self.usableCodeToIndexMap={} self.setOfUsableCodes=set() self.UnusableICDCodes=[] self.unusableCodeToIndexMap={} self.setOfUnusableCodes=set()", "for parent code. Do not include periods (ie J101 not J10.1) Returns -------", "except ValueError: intVal+=int(list(string.ascii_uppercase).index(letter))*10**(base) return intVal def _findParentInList(self,code,parentList): while len(parentList) > 0: if parentList[-1]", "simple tree (stored as a list called ICDCodes). To initialize the class, expects", "open(chapterFile,'rb') as f: f.readline() for line in f: line=self._convertToUnicode(line) line=line.strip('\\n').split('\\t') self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)] start,stop =", "if child.is_terminal==True: terminal_code_list+=[child.code] else: terminal_code_list+=self.ReturnSubsumedTerminalCodes(child.code) return terminal_code_list def __init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None): \"\"\" Class that manipulates", "terminal_code_list : list List of ICD10 codes that are children to parent code.", "f: line=self._convertToUnicode(line) parsedLine=[] parsedLine+=[line[0:6].strip()] parsedLine+=[line[6:14].strip()] parsedLine+=[line[14:16].strip()] parsedLine+=[line[77:].strip()] currentParentList = self._findParentInList(parsedLine[1],currentParentList) if len(currentParentList) ==", "self.UsableICDCodes.pop(marker_list_usable.index(del_code)) marker_list_usable.remove(del_code) self.usableCodeToIndexMap=dict(zip(marker_list_usable,range(len(marker_list_usable)))) self.unusableCodeToIndexMap=dict(zip(marker_list_unusable,range(len(marker_list_unusable)))) self.setOfUnusableCodes=set(self.unusableCodeToIndexMap.keys()) self.setOfUsableCodes=set(self.usableCodeToIndexMap.keys()) def _deleteCode(self,del_code): del_code_obj = self.ReturnCodeObject(del_code) parent_code =", "if del_code in self.setOfUnusableCodes: self.UnusableICDCodes.pop(marker_list_unusable.index(del_code)) marker_list_unusable.remove(del_code) else: self.UsableICDCodes.pop(marker_list_usable.index(del_code)) marker_list_usable.remove(del_code) self.usableCodeToIndexMap=dict(zip(marker_list_usable,range(len(marker_list_usable)))) self.unusableCodeToIndexMap=dict(zip(marker_list_unusable,range(len(marker_list_unusable)))) self.setOfUnusableCodes=set(self.unusableCodeToIndexMap.keys()) self.setOfUsableCodes=set(self.usableCodeToIndexMap.keys())", "that match the structure of the ICD10 files. Parameters ---------- useICD10UKBB : bool,", "ICD Utilities with at least some shared codes by taking advantage of shared", "x in self.UnusableICDCodes] for del_code in all_del_codes: if del_code in self.setOfUnusableCodes: self.UnusableICDCodes.pop(marker_list_unusable.index(del_code)) marker_list_unusable.remove(del_code)", "Code(s)']+=[allowed_child_codes] translation_table['Relationship']+=['Child'] translation_table=pd.DataFrame(translation_table) translation_table.set_index('Primary Code',drop=False,inplace=True) return translation_table def __init__(self,primaryEncoding=None,secondaryEncoding=None): \"\"\" Builds translation map", "to terminal node: child = {}, parent = {}\".format(code,parent_code.code)) def __str__(self): return self.associated_string", "if chapterFile==None: chapterFile = ICD_PATH+'ICD10_Chapters.txt' #quick reference, to avoid having to search full", "at least some shared codes by taking advantage of shared hierarchical structure. If", "optional Indicates if code is parent. The default is None. Raises ------ ValueError", "children to terminal node: child = {}, parent = {}\".format(code,parent_code.code)) def __str__(self): return", "except FileNotFoundError: self.primaryEncoding = ICDUtilities() self.secondaryEncoding=ICDUtilities(useICD10UKBB=True) self.EncodingCoversionTable=self._buildTranslationTable() self.EncodingCoversionTable.to_pickle(ICD_PATH+'icd10cm_to_ukbb.pth') else: self.EncodingCoversionTable=self._buildTranslationTable() def ReturnConversionSet(self,primaryCode,includeRelationship=False): \"\"\"", "to the new encoding system. Parameters ---------- primaryCode : str Diagnostic code to", "translation_table['Secondary Code(s)']+=[set([parent.code])] translation_table['Relationship']+=['Parent'] else: if len(code.child_codes)>0: child_code_names = [x.code for x in code.child_codes]", "str ICD10 code. Returns ------- str ICD10 chapter. \"\"\" code = code.replace('.','') currentCode", "child codes to known parent code. Returns ------- None. \"\"\" self.code = code", "\"\"\" Returns the chapter heading for input code Parameters ---------- code : str", "chapter file (ie main groups of codes). Again, this may work with other", "None): assert (secondaryEncoding is not None) and (secondaryEncoding is not None), \"Must specify", "if parent_code is not None: if parent_code.is_terminal==False: parent_code.child_codes.append(self) else: raise ValueError(\"Attempting to add", "---------- code : str ICD10 code string. associated_string : str String defining code", "J101 not J10.1) Returns ------- terminal_code_list : list List of ICD10 codes that", "If unable to add child codes to known parent code. Returns ------- None.", "def __repr__(self): return self.code class ICDUtilities: def _convertToUnicode(self,byteString): return unidecode(str(byteString,\"ISO-8859-1\")) def _lettersToInt(self,letter,base): list(string.ascii_uppercase).index(letter)*base", "Returns full code object (not just string) for a given code. Parameters ----------", "You can upgrade to 2019 (or downgrade for that matter) by specifying the", "self._findParentInList(parsedLine[1],currentParentList) if len(currentParentList) == 0: intVal = self._convertCodeToIntVal(parsedLine[1][0:3]) try: icd_chapter = chapter_list[next(x[0] for", "mapping of the primary code to the new encoding system. Parameters ---------- primaryCode", "child_code in del_code_obj.child_codes: del_code_list+=self._deleteCode(child_code.code) return del_code_list def AssignCodeToChapter(self,code): \"\"\" Returns the chapter heading", "as f: self.EncodingCoversionTable = pickle.load(f) except FileNotFoundError: self.primaryEncoding = ICDUtilities() self.secondaryEncoding=ICDUtilities(useICD10UKBB=True) self.EncodingCoversionTable=self._buildTranslationTable() self.EncodingCoversionTable.to_pickle(ICD_PATH+'icd10cm_to_ukbb.pth')", "with other encodings but has not been tested. The default is None. Raises", "0 currentUnusableCodeCount = 0 with open(chapterFile,'rb') as f: f.readline() for line in f:", "in f: line=self._convertToUnicode(line) line=line.strip('\\n').split('\\t') self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)] start,stop = line[1].split('-') chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])] chapter_list+=['Chapter_'+line[0]] self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount self.setOfUnusableCodes.add('Chapter_'+line[0]) currentUnusableCodeCount+=1", "to known parent code. Returns ------- None. \"\"\" self.code = code self.associated_string =", "None. \"\"\" if hierarchyFile==None: if useICD10UKBB: hierarchyFile=ICD_PATH+'icd10_ukbb.txt' else: hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt' if chapterFile==None: chapterFile =", "translation_table['Relationship']+=['Direct'] else: parent = code.parent_code if parent.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([parent.code])]", "are children of the input code. Parameters ---------- parent_code : str ICD10 string", "= self.ReturnCodeObject(parent_code).child_codes terminal_code_list=[] for child in all_child_codes: if child.is_terminal==True: terminal_code_list+=[child.code] else: terminal_code_list+=self.ReturnSubsumedTerminalCodes(child.code) return", "code is terminal (no children). parent_code : bool, optional Indicates if code is", "terminal_code_list=[] for child in all_child_codes: if child.is_terminal==True: terminal_code_list+=[child.code] else: terminal_code_list+=self.ReturnSubsumedTerminalCodes(child.code) return terminal_code_list def", "of the ICD10 files. Parameters ---------- useICD10UKBB : bool, optional Specifies class to", "the package ships with 2018 version of ICD10-CM and 2020 version of ICD10", "code.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([code.code])] translation_table['Relationship']+=['Direct'] else: parent = code.parent_code if", "version of ICD10-CM and 2020 version of ICD10 from UK Biobank. You can", "specify primary and secondary encoding if providing one or the other.\" self.primaryEncoding=primaryEncoding self.secondaryEncoding=secondaryEncoding", "intVal def _findParentInList(self,code,parentList): while len(parentList) > 0: if parentList[-1] in code: return parentList", "primary code to the new encoding system. Parameters ---------- primaryCode : str Diagnostic", "class to use the UK Biobank version of ICD10 (not ICD10-CM). The default", "Code']+=[code.code] translation_table['Secondary Code(s)']+=[allowed_child_codes] translation_table['Relationship']+=['Child'] translation_table=pd.DataFrame(translation_table) translation_table.set_index('Primary Code',drop=False,inplace=True) return translation_table def __init__(self,primaryEncoding=None,secondaryEncoding=None): \"\"\" Builds", "translation_table['Secondary Code(s)']+=[allowed_child_codes] translation_table['Relationship']+=['Child'] translation_table=pd.DataFrame(translation_table) translation_table.set_index('Primary Code',drop=False,inplace=True) return translation_table def __init__(self,primaryEncoding=None,secondaryEncoding=None): \"\"\" Builds translation", "= 0 with open(chapterFile,'rb') as f: f.readline() for line in f: line=self._convertToUnicode(line) line=line.strip('\\n').split('\\t')", "ICD10 chapter. \"\"\" code = code.replace('.','') currentCode = self.ReturnCodeObject(code) while currentCode.parent_code is not", "line. Prints out the line of interest. Returns ------- None. \"\"\" if hierarchyFile==None:", "unspecified, class creates a map between ICD10-CM and ICD10 (UKBB) Parameters ---------- primaryEncoding", "\"\"\" Builds translation map between two ICD Utilities with at least some shared", "can upgrade to 2019 (or downgrade for that matter) by specifying the path", "---------- code : str ICD10 code string. Returns ------- ICDCode ICD10 code class", "file (ie main groups of codes). Again, this may work with other encodings", "optional Second encoding. The default is None. Returns ------- None. \"\"\" if (primaryEncoding", "string. associated_string : str String defining code in codebook. is_terminal : bool Indicates", "includeRelationship: look_up=['Secondary Code(s)','Relationship'] else: look_up='Secondary Code(s)' try: return self.EncodingCoversionTable.loc[primaryCode][look_up] except KeyError: return set([])", "type in addition to code. The default is False. Returns ------- set Set", "self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount self.setOfUnusableCodes.add(parsedLine[1]) currentUnusableCodeCount+=1 currentParentList+=[parsedLine[1]] class ICD10TranslationMap: def _buildTranslationTable(self): translation_table={'Primary Code':[],'Secondary Code(s)':[],'Relationship':[]} for code", "else: return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]] def DeleteCode(self,del_code): \"\"\" Removes the ICD code and all children", "Parameters ---------- parent_code : str ICD10 string for parent code. Do not include", "optional File path to an alternative code chapter file (ie main groups of", "having to search full tree for codes. #Full list of linked codes self.UsableICDCodes=[]", "shared codes by taking advantage of shared hierarchical structure. If primaryEncoding and secondaryEncoding", "self.secondaryEncoding=secondaryEncoding if primaryEncoding is None: try: with open(ICD_PATH+'icd10cm_to_ukbb.pth','rb') as f: self.EncodingCoversionTable = pickle.load(f)", "other.\" self.primaryEncoding=primaryEncoding self.secondaryEncoding=secondaryEncoding if primaryEncoding is None: try: with open(ICD_PATH+'icd10cm_to_ukbb.pth','rb') as f: self.EncodingCoversionTable", "is False. hierarchyFile : str, optional File path to an alternative code hierarchy", "------- None. \"\"\" if (primaryEncoding is not None) or (secondaryEncoding is not None):", "default, the package ships with 2018 version of ICD10-CM and 2020 version of", "to be converted . includeRelationship : bool, optional. Specicies whether to return the", "= code.replace('.','') currentCode = self.ReturnCodeObject(code) while currentCode.parent_code is not None: currentCode = self.ReturnCodeObject(currentCode.parent_code.code)", "f: self.EncodingCoversionTable = pickle.load(f) except FileNotFoundError: self.primaryEncoding = ICDUtilities() self.secondaryEncoding=ICDUtilities(useICD10UKBB=True) self.EncodingCoversionTable=self._buildTranslationTable() self.EncodingCoversionTable.to_pickle(ICD_PATH+'icd10cm_to_ukbb.pth') else:", "code object (not just string) for a given code. Parameters ---------- code :", "First encoding. The default is None. secondaryEncoding : ICDUtilities, optional Second encoding. The", "> 0: if parentList[-1] in code: return parentList else: parentList.pop() return parentList def", "str String defining code in codebook. is_terminal : bool Indicates whether code is", "raised if unable to parse some line. Prints out the line of interest.", "code and all children (if exist) from data structure. Parameters ---------- del_code :", "ICD10 string for parent code. Do not include periods (ie J101 not J10.1)", "def __init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None): \"\"\" Class that manipulates the ICD10 codebook. It stores the codebook", "been tested. The default is None. Raises ------ ValueError ValueError raised if unable", "class ICD10TranslationMap: def _buildTranslationTable(self): translation_table={'Primary Code':[],'Secondary Code(s)':[],'Relationship':[]} for code in self.primaryEncoding.UsableICDCodes: #first check", "code. \"\"\" all_child_codes = self.ReturnCodeObject(parent_code).child_codes terminal_code_list=[] for child in all_child_codes: if child.is_terminal==True: terminal_code_list+=[child.code]", "(secondaryEncoding is not None): assert (secondaryEncoding is not None) and (secondaryEncoding is not", "called ICDCodes). To initialize the class, expects flat two text files: 1) ICD10_Chapters.txt--chapter", "in addition to code. The default is False. Returns ------- set Set of", "secondaryEncoding are unspecified, class creates a map between ICD10-CM and ICD10 (UKBB) Parameters", "code to be converted . includeRelationship : bool, optional. Specicies whether to return", "raise ValueError(\"Attempting to add children to terminal node: child = {}, parent =", "the chapters chapter_breakpoints=[] chapter_list=[] currentUsableCodeCount = 0 currentUnusableCodeCount = 0 with open(chapterFile,'rb') as", "version of ICD10 (not ICD10-CM). The default is False. hierarchyFile : str, optional", "ICD10 codebook. It stores the codebook as a simple tree (stored as a", "---------- code : str ICD10 code. Returns ------- str ICD10 chapter. \"\"\" code", "= parent_code if parent_code is not None: if parent_code.is_terminal==False: parent_code.child_codes.append(self) else: raise ValueError(\"Attempting", "pickle.load(f) except FileNotFoundError: self.primaryEncoding = ICDUtilities() self.secondaryEncoding=ICDUtilities(useICD10UKBB=True) self.EncodingCoversionTable=self._buildTranslationTable() self.EncodingCoversionTable.to_pickle(ICD_PATH+'icd10cm_to_ukbb.pth') else: self.EncodingCoversionTable=self._buildTranslationTable() def ReturnConversionSet(self,primaryCode,includeRelationship=False):", "@author: davidblair \"\"\" from unidecode import unidecode import pkg_resources import string import pickle", "self.associated_string def __repr__(self): return self.code class ICDUtilities: def _convertToUnicode(self,byteString): return unidecode(str(byteString,\"ISO-8859-1\")) def _lettersToInt(self,letter,base):", "child = {}, parent = {}\".format(code,parent_code.code)) def __str__(self): return self.associated_string def __repr__(self): return", "the structure of the ICD10 files. Parameters ---------- useICD10UKBB : bool, optional Specifies", "providing one or the other.\" self.primaryEncoding=primaryEncoding self.secondaryEncoding=secondaryEncoding if primaryEncoding is None: try: with", "whether to return the relationship type in addition to code. The default is", "Parameters ---------- code : str ICD10 code string. associated_string : str String defining", "ReturnCodeObject(self,code): \"\"\" Returns full code object (not just string) for a given code.", "+=[icd_chapter] if int(parsedLine[2])==1: self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))] self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount self.setOfUsableCodes.add(parsedLine[1]) currentUsableCodeCount+=1 else: self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))] self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount self.setOfUnusableCodes.add(parsedLine[1]) currentUnusableCodeCount+=1 currentParentList+=[parsedLine[1]]", "and ICD10 (UKBB) Parameters ---------- primaryEncoding : ICDUtilities, optional First encoding. The default", "has not been tested. The default is None. Raises ------ ValueError ValueError raised", "\"\"\" Returns set of codes that represent the mapping of the primary code", "intVal <= x[1])] except StopIteration: raise ValueError('{}'.format(parsedLine[1])) currentParentList +=[icd_chapter] if int(parsedLine[2])==1: self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))] self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount", "self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount self.setOfUsableCodes.add(parsedLine[1]) currentUsableCodeCount+=1 else: self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))] self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount self.setOfUnusableCodes.add(parsedLine[1]) currentUnusableCodeCount+=1 currentParentList+=[parsedLine[1]] class ICD10TranslationMap: def _buildTranslationTable(self):", "file. ICD9 codebook could be used instead, but you would need to construct", "The default is False. Returns ------- set Set of codes aligned to the", "parent.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([parent.code])] translation_table['Relationship']+=['Parent'] else: if len(code.child_codes)>0: child_code_names =", "The default is False. hierarchyFile : str, optional File path to an alternative", "assert (secondaryEncoding is not None) and (secondaryEncoding is not None), \"Must specify primary", "str ICD10 chapter. \"\"\" code = code.replace('.','') currentCode = self.ReturnCodeObject(code) while currentCode.parent_code is", "for child_code in del_code_obj.child_codes: del_code_list+=self._deleteCode(child_code.code) return del_code_list def AssignCodeToChapter(self,code): \"\"\" Returns the chapter", "defining code in codebook. is_terminal : bool Indicates whether code is terminal (no", ": str, optional File path to an alternative code hierarchy file. This may", "is None: try: with open(ICD_PATH+'icd10cm_to_ukbb.pth','rb') as f: self.EncodingCoversionTable = pickle.load(f) except FileNotFoundError: self.primaryEncoding", "hierarchyFile==None: if useICD10UKBB: hierarchyFile=ICD_PATH+'icd10_ukbb.txt' else: hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt' if chapterFile==None: chapterFile = ICD_PATH+'ICD10_Chapters.txt' #quick reference,", "encoding. The default is None. secondaryEncoding : ICDUtilities, optional Second encoding. The default", "in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([parent.code])] translation_table['Relationship']+=['Parent'] else: if len(code.child_codes)>0: child_code_names = [x.code", "open(ICD_PATH+'icd10cm_to_ukbb.pth','rb') as f: self.EncodingCoversionTable = pickle.load(f) except FileNotFoundError: self.primaryEncoding = ICDUtilities() self.secondaryEncoding=ICDUtilities(useICD10UKBB=True) self.EncodingCoversionTable=self._buildTranslationTable()", "Returns set of codes that represent the mapping of the primary code to", "[x.code for x in code.child_codes] allowed_child_codes = set(child_code_names).intersection(self.secondaryEncoding.setOfUsableCodes) if len(allowed_child_codes)>0: translation_table['Primary Code']+=[code.code] translation_table['Secondary", "Utilities with at least some shared codes by taking advantage of shared hierarchical", "associated_string,is_terminal, parent_code=None): \"\"\" Parameters ---------- code : str ICD10 code string. associated_string :", "parent. The default is None. Raises ------ ValueError If unable to add child", "and all children (if exist) from data structure. Parameters ---------- del_code : str", "given code. Parameters ---------- code : str ICD10 code string. Returns ------- ICDCode", "[] self.is_terminal=is_terminal self.parent_code = parent_code if parent_code is not None: if parent_code.is_terminal==False: parent_code.child_codes.append(self)", "need to construct data files that match the structure of the ICD10 files.", "relationship type in addition to code. The default is False. Returns ------- set", "intVal+=int(list(string.ascii_uppercase).index(letter))*10**(base) return intVal def _findParentInList(self,code,parentList): while len(parentList) > 0: if parentList[-1] in code:", "is None. secondaryEncoding : ICDUtilities, optional Second encoding. The default is None. Returns", "self.ReturnCodeObject(del_code) parent_code = del_code_obj.parent_code del_code_list=[del_code] if parent_code is not None: parent_code.child_codes.remove(del_code_obj) if del_code_obj.is_terminal==False:", "translation_table={'Primary Code':[],'Secondary Code(s)':[],'Relationship':[]} for code in self.primaryEncoding.UsableICDCodes: #first check if there is 1:1", "f: line=self._convertToUnicode(line) line=line.strip('\\n').split('\\t') self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)] start,stop = line[1].split('-') chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])] chapter_list+=['Chapter_'+line[0]] self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount self.setOfUnusableCodes.add('Chapter_'+line[0]) currentUnusableCodeCount+=1 #now", "currentParentList = self._findParentInList(parsedLine[1],currentParentList) if len(currentParentList) == 0: intVal = self._convertCodeToIntVal(parsedLine[1][0:3]) try: icd_chapter =", "Returns ------- set Set of codes aligned to the code of interest. \"\"\"", "It stores the codebook as a simple tree (stored as a list called", "is parent. The default is None. Raises ------ ValueError If unable to add", "with 2018 version of ICD10-CM and 2020 version of ICD10 from UK Biobank.", "import string import pickle import pandas as pd ICD_PATH = pkg_resources.resource_filename('vlpi', 'data/ICDData/') class", "if intVal <= x[1])] except StopIteration: raise ValueError('{}'.format(parsedLine[1])) currentParentList +=[icd_chapter] if int(parsedLine[2])==1: self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))]", "class ICDCode: def __init__(self, code, associated_string,is_terminal, parent_code=None): \"\"\" Parameters ---------- code : str", "for line in f: line=self._convertToUnicode(line) parsedLine=[] parsedLine+=[line[0:6].strip()] parsedLine+=[line[6:14].strip()] parsedLine+=[line[14:16].strip()] parsedLine+=[line[77:].strip()] currentParentList = self._findParentInList(parsedLine[1],currentParentList)", "= {}\".format(code,parent_code.code)) def __str__(self): return self.associated_string def __repr__(self): return self.code class ICDUtilities: def", "two text files: 1) ICD10_Chapters.txt--chapter heading for all the codes. Manually constructed. 2)", "parent_code=None): \"\"\" Parameters ---------- code : str ICD10 code string. associated_string : str", "full code object (not just string) for a given code. Parameters ---------- code", "by specifying the path to another ICD10 file. ICD9 codebook could be used", "__str__(self): return self.associated_string def __repr__(self): return self.code class ICDUtilities: def _convertToUnicode(self,byteString): return unidecode(str(byteString,\"ISO-8859-1\"))", "Code(s)']+=[set([parent.code])] translation_table['Relationship']+=['Parent'] else: if len(code.child_codes)>0: child_code_names = [x.code for x in code.child_codes] allowed_child_codes", "Code']+=[code.code] translation_table['Secondary Code(s)']+=[set([parent.code])] translation_table['Relationship']+=['Parent'] else: if len(code.child_codes)>0: child_code_names = [x.code for x in", "def _buildTranslationTable(self): translation_table={'Primary Code':[],'Secondary Code(s)':[],'Relationship':[]} for code in self.primaryEncoding.UsableICDCodes: #first check if there", "= [] for line in f: line=self._convertToUnicode(line) parsedLine=[] parsedLine+=[line[0:6].strip()] parsedLine+=[line[6:14].strip()] parsedLine+=[line[14:16].strip()] parsedLine+=[line[77:].strip()] currentParentList", "as pd ICD_PATH = pkg_resources.resource_filename('vlpi', 'data/ICDData/') class ICDCode: def __init__(self, code, associated_string,is_terminal, parent_code=None):", "\"\"\" Removes the ICD code and all children (if exist) from data structure.", "{}\".format(code,parent_code.code)) def __str__(self): return self.associated_string def __repr__(self): return self.code class ICDUtilities: def _convertToUnicode(self,byteString):", "ICD10 code string. associated_string : str String defining code in codebook. is_terminal :", "if unable to parse some line. Prints out the line of interest. Returns", "(unlikely) work with other encodings but has not been tested. The default is", "__repr__(self): return self.code class ICDUtilities: def _convertToUnicode(self,byteString): return unidecode(str(byteString,\"ISO-8859-1\")) def _lettersToInt(self,letter,base): list(string.ascii_uppercase).index(letter)*base def", "main groups of codes). Again, this may work with other encodings but has", "translation_table['Secondary Code(s)']+=[set([code.code])] translation_table['Relationship']+=['Direct'] else: parent = code.parent_code if parent.code in self.secondaryEncoding.setOfUsableCodes: translation_table['Primary Code']+=[code.code]", "Diagnostic code to be converted . includeRelationship : bool, optional. Specicies whether to", "self.primaryEncoding.UsableICDCodes: #first check if there is 1:1 mapping between codes if code.code in", "to parent code. \"\"\" all_child_codes = self.ReturnCodeObject(parent_code).child_codes terminal_code_list=[] for child in all_child_codes: if", "(not just string) for a given code. Parameters ---------- code : str ICD10", "for del_code in all_del_codes: if del_code in self.setOfUnusableCodes: self.UnusableICDCodes.pop(marker_list_unusable.index(del_code)) marker_list_unusable.remove(del_code) else: self.UsableICDCodes.pop(marker_list_usable.index(del_code)) marker_list_usable.remove(del_code)", "len(parentList) > 0: if parentList[-1] in code: return parentList else: parentList.pop() return parentList", "AssignCodeToChapter(self,code): \"\"\" Returns the chapter heading for input code Parameters ---------- code :", "the chapter heading for input code Parameters ---------- code : str ICD10 code.", "chapterFile : str, optional File path to an alternative code chapter file (ie", "parsedLine+=[line[14:16].strip()] parsedLine+=[line[77:].strip()] currentParentList = self._findParentInList(parsedLine[1],currentParentList) if len(currentParentList) == 0: intVal = self._convertCodeToIntVal(parsedLine[1][0:3]) try:", "= set(child_code_names).intersection(self.secondaryEncoding.setOfUsableCodes) if len(allowed_child_codes)>0: translation_table['Primary Code']+=[code.code] translation_table['Secondary Code(s)']+=[allowed_child_codes] translation_table['Relationship']+=['Child'] translation_table=pd.DataFrame(translation_table) translation_table.set_index('Primary Code',drop=False,inplace=True) return", "ICDUtilities() self.secondaryEncoding=ICDUtilities(useICD10UKBB=True) self.EncodingCoversionTable=self._buildTranslationTable() self.EncodingCoversionTable.to_pickle(ICD_PATH+'icd10cm_to_ukbb.pth') else: self.EncodingCoversionTable=self._buildTranslationTable() def ReturnConversionSet(self,primaryCode,includeRelationship=False): \"\"\" Returns set of codes", "self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]] def DeleteCode(self,del_code): \"\"\" Removes the ICD code and all children (if exist)", "to search full tree for codes. #Full list of linked codes self.UsableICDCodes=[] self.usableCodeToIndexMap={}", "else: self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))] self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount self.setOfUnusableCodes.add(parsedLine[1]) currentUnusableCodeCount+=1 currentParentList+=[parsedLine[1]] class ICD10TranslationMap: def _buildTranslationTable(self): translation_table={'Primary Code':[],'Secondary Code(s)':[],'Relationship':[]}", "Parameters ---------- primaryEncoding : ICDUtilities, optional First encoding. The default is None. secondaryEncoding", "return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]] def DeleteCode(self,del_code): \"\"\" Removes the ICD code and all children (if", "marker_list_usable= [x.code for x in self.UsableICDCodes] marker_list_unusable = [x.code for x in self.UnusableICDCodes]", "not include periods (ie J101 not J10.1) Returns ------- terminal_code_list : list List", "return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]] else: return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]] def DeleteCode(self,del_code): \"\"\" Removes the ICD code and", ": str ICD10 code to delete Returns ------- None \"\"\" all_del_codes=self._deleteCode(del_code) marker_list_usable= [x.code", "flat two text files: 1) ICD10_Chapters.txt--chapter heading for all the codes. Manually constructed.", "None: if parent_code.is_terminal==False: parent_code.child_codes.append(self) else: raise ValueError(\"Attempting to add children to terminal node:", "is not None: if parent_code.is_terminal==False: parent_code.child_codes.append(self) else: raise ValueError(\"Attempting to add children to", "File path to an alternative code hierarchy file. This may (unlikely) work with", "periods (ie J101 not J10.1) Returns ------- terminal_code_list : list List of ICD10", "return parentList else: parentList.pop() return parentList def ReturnCodeObject(self,code): \"\"\" Returns full code object" ]
[ "custom from schemas import user as user_schemas from schemas.match import Match, FilterParams from", "create, custom from schemas import user as user_schemas from schemas.match import Match, FilterParams", "filter_params=filter_params, db=db) # create Match Pydantic models current_match_data = Match(current_user_id=user_id, other_user_id=matched_user.id, other_user_name=f\"{matched_user.first_name} {matched_user.last_name}\")", "other_user_id=matched_user.id, other_user_name=f\"{matched_user.first_name} {matched_user.last_name}\") other_match_data = Match(current_user_id=matched_user.id, other_user_id=user_id, other_user_name=f\"{user_data.first_name} {user_data.last_name}\") # create match objects", "user has already matched in the past 3 days if user_data.last_matched_time: three_days_after_match =", "= datetime.datetime.now() if current_time < three_days_after_match: next_valid_date = datetime.date(day=three_days_after_match.day, month=three_days_after_match.month, year=three_days_after_match.year).strftime('%A %d %B", "= create.create_single_isolated_resource(model=match.Match, data=other_match_data, db=db) # update last_matched_time for each user user_data.last_matched_time = datetime.datetime.now()", "detail='No matches made yet!') latest_match = max(user_data.previous_matches, key=attrgetter('matched_at')) return read.read_single_resource(model=user_model.User, identifier='id', value=latest_match.other_user_id, db=db)", "# create match objects in the database current_match = create.create_single_isolated_resource(model=match.Match, data=current_match_data, db=db) other_match", "already matched within the past 3 days. Wait till {next_valid_date}\") # run matching", "%d %B %Y') raise HTTPException(status_code=403, detail=f\"You've already matched within the past 3 days.", "other_match_data = Match(current_user_id=matched_user.id, other_user_id=user_id, other_user_name=f\"{user_data.first_name} {user_data.last_name}\") # create match objects in the database", "user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) # check if user has already matched", "value=user_id, db=db) if user_data is None or user_data.previous_matches == []: raise HTTPException(status_code=404, detail='No", "schemas.match import Match, FilterParams from . import session_dep match_router = APIRouter() @match_router.post('/user/{user_id}/match/', response_model=user_schemas.UserGet,", "database db.add(current_match) db.add(other_match) db.commit() db.refresh(user_data) return matched_user @match_router.get(\"/user/{user_id}/match/latest/\", response_model=user_schemas.UserGet, status_code=200) async def get_latest_match(user_id:", "FilterParams from . import session_dep match_router = APIRouter() @match_router.post('/user/{user_id}/match/', response_model=user_schemas.UserGet, status_code=201) async def", "user_data is None or user_data.previous_matches == []: raise HTTPException(status_code=404, detail='No matches made yet!')", "= APIRouter() @match_router.post('/user/{user_id}/match/', response_model=user_schemas.UserGet, status_code=201) async def add_new_match(user_id: int, filter_params: FilterParams, db=session_dep): user_data", "changes in the database db.add(current_match) db.add(other_match) db.commit() db.refresh(user_data) return matched_user @match_router.get(\"/user/{user_id}/match/latest/\", response_model=user_schemas.UserGet, status_code=200)", "import session_dep match_router = APIRouter() @match_router.post('/user/{user_id}/match/', response_model=user_schemas.UserGet, status_code=201) async def add_new_match(user_id: int, filter_params:", "session_dep match_router = APIRouter() @match_router.post('/user/{user_id}/match/', response_model=user_schemas.UserGet, status_code=201) async def add_new_match(user_id: int, filter_params: FilterParams,", "@match_router.post('/user/{user_id}/match/', response_model=user_schemas.UserGet, status_code=201) async def add_new_match(user_id: int, filter_params: FilterParams, db=session_dep): user_data = read.read_single_resource(model=user_model.User,", "from fastapi import APIRouter, HTTPException from models import user as user_model, match from", "import user as user_schemas from schemas.match import Match, FilterParams from . import session_dep", "create Match Pydantic models current_match_data = Match(current_user_id=user_id, other_user_id=matched_user.id, other_user_name=f\"{matched_user.first_name} {matched_user.last_name}\") other_match_data = Match(current_user_id=matched_user.id,", "+ datetime.timedelta(days=3) current_time = datetime.datetime.now() if current_time < three_days_after_match: next_valid_date = datetime.date(day=three_days_after_match.day, month=three_days_after_match.month,", "if user_data.last_matched_time: three_days_after_match = user_data.last_matched_time + datetime.timedelta(days=3) current_time = datetime.datetime.now() if current_time <", "HTTPException(status_code=403, detail=f\"You've already matched within the past 3 days. Wait till {next_valid_date}\") #", "raise HTTPException(status_code=403, detail=f\"You've already matched within the past 3 days. Wait till {next_valid_date}\")", "None or user_data.previous_matches == []: raise HTTPException(status_code=404, detail='No matches made yet!') latest_match =", "already matched in the past 3 days if user_data.last_matched_time: three_days_after_match = user_data.last_matched_time +", "= datetime.datetime.now() # commit all changes in the database db.add(current_match) db.add(other_match) db.commit() db.refresh(user_data)", "{next_valid_date}\") # run matching algorithm matched_user = custom.match_user(user_id=user_id, filter_params=filter_params, db=db) # create Match", "other_user_name=f\"{user_data.first_name} {user_data.last_name}\") # create match objects in the database current_match = create.create_single_isolated_resource(model=match.Match, data=current_match_data,", "APIRouter() @match_router.post('/user/{user_id}/match/', response_model=user_schemas.UserGet, status_code=201) async def add_new_match(user_id: int, filter_params: FilterParams, db=session_dep): user_data =", "other_match = create.create_single_isolated_resource(model=match.Match, data=other_match_data, db=db) # update last_matched_time for each user user_data.last_matched_time =", "@match_router.get(\"/user/{user_id}/match/latest/\", response_model=user_schemas.UserGet, status_code=200) async def get_latest_match(user_id: int, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id,", "three_days_after_match = user_data.last_matched_time + datetime.timedelta(days=3) current_time = datetime.datetime.now() if current_time < three_days_after_match: next_valid_date", "fastapi import APIRouter, HTTPException from models import user as user_model, match from resources.crud", "from models import user as user_model, match from resources.crud import read, create, custom", "resources.crud import read, create, custom from schemas import user as user_schemas from schemas.match", "# create Match Pydantic models current_match_data = Match(current_user_id=user_id, other_user_id=matched_user.id, other_user_name=f\"{matched_user.first_name} {matched_user.last_name}\") other_match_data =", "run matching algorithm matched_user = custom.match_user(user_id=user_id, filter_params=filter_params, db=db) # create Match Pydantic models", "user_data.last_matched_time + datetime.timedelta(days=3) current_time = datetime.datetime.now() if current_time < three_days_after_match: next_valid_date = datetime.date(day=three_days_after_match.day,", "raise HTTPException(status_code=404, detail='No matches made yet!') latest_match = max(user_data.previous_matches, key=attrgetter('matched_at')) return read.read_single_resource(model=user_model.User, identifier='id',", "HTTPException from models import user as user_model, match from resources.crud import read, create,", "user_model, match from resources.crud import read, create, custom from schemas import user as", "def get_latest_match(user_id: int, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) if user_data is", "%B %Y') raise HTTPException(status_code=403, detail=f\"You've already matched within the past 3 days. Wait", "= Match(current_user_id=matched_user.id, other_user_id=user_id, other_user_name=f\"{user_data.first_name} {user_data.last_name}\") # create match objects in the database current_match", "till {next_valid_date}\") # run matching algorithm matched_user = custom.match_user(user_id=user_id, filter_params=filter_params, db=db) # create", "read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) if user_data is None or user_data.previous_matches == []: raise", "is None or user_data.previous_matches == []: raise HTTPException(status_code=404, detail='No matches made yet!') latest_match", "async def add_new_match(user_id: int, filter_params: FilterParams, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db)", "as user_model, match from resources.crud import read, create, custom from schemas import user", "days. Wait till {next_valid_date}\") # run matching algorithm matched_user = custom.match_user(user_id=user_id, filter_params=filter_params, db=db)", "< three_days_after_match: next_valid_date = datetime.date(day=three_days_after_match.day, month=three_days_after_match.month, year=three_days_after_match.year).strftime('%A %d %B %Y') raise HTTPException(status_code=403, detail=f\"You've", "the past 3 days if user_data.last_matched_time: three_days_after_match = user_data.last_matched_time + datetime.timedelta(days=3) current_time =", "status_code=200) async def get_latest_match(user_id: int, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) if", "db.refresh(user_data) return matched_user @match_router.get(\"/user/{user_id}/match/latest/\", response_model=user_schemas.UserGet, status_code=200) async def get_latest_match(user_id: int, db=session_dep): user_data =", "get_latest_match(user_id: int, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) if user_data is None", "db=db) # update last_matched_time for each user user_data.last_matched_time = datetime.datetime.now() matched_user.last_matched_time = datetime.datetime.now()", "attrgetter from fastapi import APIRouter, HTTPException from models import user as user_model, match", "models current_match_data = Match(current_user_id=user_id, other_user_id=matched_user.id, other_user_name=f\"{matched_user.first_name} {matched_user.last_name}\") other_match_data = Match(current_user_id=matched_user.id, other_user_id=user_id, other_user_name=f\"{user_data.first_name} {user_data.last_name}\")", "matched_user = custom.match_user(user_id=user_id, filter_params=filter_params, db=db) # create Match Pydantic models current_match_data = Match(current_user_id=user_id,", "read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) # check if user has already matched in the", "user_data.last_matched_time: three_days_after_match = user_data.last_matched_time + datetime.timedelta(days=3) current_time = datetime.datetime.now() if current_time < three_days_after_match:", "= read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) # check if user has already matched in", "import user as user_model, match from resources.crud import read, create, custom from schemas", "# check if user has already matched in the past 3 days if", "all changes in the database db.add(current_match) db.add(other_match) db.commit() db.refresh(user_data) return matched_user @match_router.get(\"/user/{user_id}/match/latest/\", response_model=user_schemas.UserGet,", "match objects in the database current_match = create.create_single_isolated_resource(model=match.Match, data=current_match_data, db=db) other_match = create.create_single_isolated_resource(model=match.Match,", "matched in the past 3 days if user_data.last_matched_time: three_days_after_match = user_data.last_matched_time + datetime.timedelta(days=3)", "as user_schemas from schemas.match import Match, FilterParams from . import session_dep match_router =", "= datetime.date(day=three_days_after_match.day, month=three_days_after_match.month, year=three_days_after_match.year).strftime('%A %d %B %Y') raise HTTPException(status_code=403, detail=f\"You've already matched within", "current_match = create.create_single_isolated_resource(model=match.Match, data=current_match_data, db=db) other_match = create.create_single_isolated_resource(model=match.Match, data=other_match_data, db=db) # update last_matched_time", "other_user_id=user_id, other_user_name=f\"{user_data.first_name} {user_data.last_name}\") # create match objects in the database current_match = create.create_single_isolated_resource(model=match.Match,", "data=current_match_data, db=db) other_match = create.create_single_isolated_resource(model=match.Match, data=other_match_data, db=db) # update last_matched_time for each user", "user_data.last_matched_time = datetime.datetime.now() matched_user.last_matched_time = datetime.datetime.now() # commit all changes in the database", "db=db) # create Match Pydantic models current_match_data = Match(current_user_id=user_id, other_user_id=matched_user.id, other_user_name=f\"{matched_user.first_name} {matched_user.last_name}\") other_match_data", "data=other_match_data, db=db) # update last_matched_time for each user user_data.last_matched_time = datetime.datetime.now() matched_user.last_matched_time =", "year=three_days_after_match.year).strftime('%A %d %B %Y') raise HTTPException(status_code=403, detail=f\"You've already matched within the past 3", "{user_data.last_name}\") # create match objects in the database current_match = create.create_single_isolated_resource(model=match.Match, data=current_match_data, db=db)", "from resources.crud import read, create, custom from schemas import user as user_schemas from", "# commit all changes in the database db.add(current_match) db.add(other_match) db.commit() db.refresh(user_data) return matched_user", "matching algorithm matched_user = custom.match_user(user_id=user_id, filter_params=filter_params, db=db) # create Match Pydantic models current_match_data", "has already matched in the past 3 days if user_data.last_matched_time: three_days_after_match = user_data.last_matched_time", "within the past 3 days. Wait till {next_valid_date}\") # run matching algorithm matched_user", "async def get_latest_match(user_id: int, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) if user_data", ". import session_dep match_router = APIRouter() @match_router.post('/user/{user_id}/match/', response_model=user_schemas.UserGet, status_code=201) async def add_new_match(user_id: int,", "filter_params: FilterParams, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) # check if user", "models import user as user_model, match from resources.crud import read, create, custom from", "def add_new_match(user_id: int, filter_params: FilterParams, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) #", "import Match, FilterParams from . import session_dep match_router = APIRouter() @match_router.post('/user/{user_id}/match/', response_model=user_schemas.UserGet, status_code=201)", "create match objects in the database current_match = create.create_single_isolated_resource(model=match.Match, data=current_match_data, db=db) other_match =", "the database db.add(current_match) db.add(other_match) db.commit() db.refresh(user_data) return matched_user @match_router.get(\"/user/{user_id}/match/latest/\", response_model=user_schemas.UserGet, status_code=200) async def", "# run matching algorithm matched_user = custom.match_user(user_id=user_id, filter_params=filter_params, db=db) # create Match Pydantic", "Match, FilterParams from . import session_dep match_router = APIRouter() @match_router.post('/user/{user_id}/match/', response_model=user_schemas.UserGet, status_code=201) async", "match from resources.crud import read, create, custom from schemas import user as user_schemas", "for each user user_data.last_matched_time = datetime.datetime.now() matched_user.last_matched_time = datetime.datetime.now() # commit all changes", "the database current_match = create.create_single_isolated_resource(model=match.Match, data=current_match_data, db=db) other_match = create.create_single_isolated_resource(model=match.Match, data=other_match_data, db=db) #", "== []: raise HTTPException(status_code=404, detail='No matches made yet!') latest_match = max(user_data.previous_matches, key=attrgetter('matched_at')) return", "identifier='id', value=user_id, db=db) if user_data is None or user_data.previous_matches == []: raise HTTPException(status_code=404,", "user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) if user_data is None or user_data.previous_matches ==", "each user user_data.last_matched_time = datetime.datetime.now() matched_user.last_matched_time = datetime.datetime.now() # commit all changes in", "datetime.datetime.now() matched_user.last_matched_time = datetime.datetime.now() # commit all changes in the database db.add(current_match) db.add(other_match)", "from . import session_dep match_router = APIRouter() @match_router.post('/user/{user_id}/match/', response_model=user_schemas.UserGet, status_code=201) async def add_new_match(user_id:", "database current_match = create.create_single_isolated_resource(model=match.Match, data=current_match_data, db=db) other_match = create.create_single_isolated_resource(model=match.Match, data=other_match_data, db=db) # update", "or user_data.previous_matches == []: raise HTTPException(status_code=404, detail='No matches made yet!') latest_match = max(user_data.previous_matches,", "db.add(other_match) db.commit() db.refresh(user_data) return matched_user @match_router.get(\"/user/{user_id}/match/latest/\", response_model=user_schemas.UserGet, status_code=200) async def get_latest_match(user_id: int, db=session_dep):", "current_time < three_days_after_match: next_valid_date = datetime.date(day=three_days_after_match.day, month=three_days_after_match.month, year=three_days_after_match.year).strftime('%A %d %B %Y') raise HTTPException(status_code=403,", "db=db) if user_data is None or user_data.previous_matches == []: raise HTTPException(status_code=404, detail='No matches", "3 days. Wait till {next_valid_date}\") # run matching algorithm matched_user = custom.match_user(user_id=user_id, filter_params=filter_params,", "Pydantic models current_match_data = Match(current_user_id=user_id, other_user_id=matched_user.id, other_user_name=f\"{matched_user.first_name} {matched_user.last_name}\") other_match_data = Match(current_user_id=matched_user.id, other_user_id=user_id, other_user_name=f\"{user_data.first_name}", "last_matched_time for each user user_data.last_matched_time = datetime.datetime.now() matched_user.last_matched_time = datetime.datetime.now() # commit all", "[]: raise HTTPException(status_code=404, detail='No matches made yet!') latest_match = max(user_data.previous_matches, key=attrgetter('matched_at')) return read.read_single_resource(model=user_model.User,", "Match Pydantic models current_match_data = Match(current_user_id=user_id, other_user_id=matched_user.id, other_user_name=f\"{matched_user.first_name} {matched_user.last_name}\") other_match_data = Match(current_user_id=matched_user.id, other_user_id=user_id,", "if current_time < three_days_after_match: next_valid_date = datetime.date(day=three_days_after_match.day, month=three_days_after_match.month, year=three_days_after_match.year).strftime('%A %d %B %Y') raise", "= read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) if user_data is None or user_data.previous_matches == []:", "db.add(current_match) db.add(other_match) db.commit() db.refresh(user_data) return matched_user @match_router.get(\"/user/{user_id}/match/latest/\", response_model=user_schemas.UserGet, status_code=200) async def get_latest_match(user_id: int,", "import read, create, custom from schemas import user as user_schemas from schemas.match import", "custom.match_user(user_id=user_id, filter_params=filter_params, db=db) # create Match Pydantic models current_match_data = Match(current_user_id=user_id, other_user_id=matched_user.id, other_user_name=f\"{matched_user.first_name}", "= user_data.last_matched_time + datetime.timedelta(days=3) current_time = datetime.datetime.now() if current_time < three_days_after_match: next_valid_date =", "identifier='id', value=user_id, db=db) # check if user has already matched in the past", "algorithm matched_user = custom.match_user(user_id=user_id, filter_params=filter_params, db=db) # create Match Pydantic models current_match_data =", "matched_user.last_matched_time = datetime.datetime.now() # commit all changes in the database db.add(current_match) db.add(other_match) db.commit()", "if user_data is None or user_data.previous_matches == []: raise HTTPException(status_code=404, detail='No matches made", "import APIRouter, HTTPException from models import user as user_model, match from resources.crud import", "in the database db.add(current_match) db.add(other_match) db.commit() db.refresh(user_data) return matched_user @match_router.get(\"/user/{user_id}/match/latest/\", response_model=user_schemas.UserGet, status_code=200) async", "days if user_data.last_matched_time: three_days_after_match = user_data.last_matched_time + datetime.timedelta(days=3) current_time = datetime.datetime.now() if current_time", "detail=f\"You've already matched within the past 3 days. Wait till {next_valid_date}\") # run", "other_user_name=f\"{matched_user.first_name} {matched_user.last_name}\") other_match_data = Match(current_user_id=matched_user.id, other_user_id=user_id, other_user_name=f\"{user_data.first_name} {user_data.last_name}\") # create match objects in", "create.create_single_isolated_resource(model=match.Match, data=current_match_data, db=db) other_match = create.create_single_isolated_resource(model=match.Match, data=other_match_data, db=db) # update last_matched_time for each", "db.commit() db.refresh(user_data) return matched_user @match_router.get(\"/user/{user_id}/match/latest/\", response_model=user_schemas.UserGet, status_code=200) async def get_latest_match(user_id: int, db=session_dep): user_data", "matched_user @match_router.get(\"/user/{user_id}/match/latest/\", response_model=user_schemas.UserGet, status_code=200) async def get_latest_match(user_id: int, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id',", "status_code=201) async def add_new_match(user_id: int, filter_params: FilterParams, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id,", "past 3 days if user_data.last_matched_time: three_days_after_match = user_data.last_matched_time + datetime.timedelta(days=3) current_time = datetime.datetime.now()", "FilterParams, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) # check if user has", "HTTPException(status_code=404, detail='No matches made yet!') latest_match = max(user_data.previous_matches, key=attrgetter('matched_at')) return read.read_single_resource(model=user_model.User, identifier='id', value=latest_match.other_user_id,", "the past 3 days. Wait till {next_valid_date}\") # run matching algorithm matched_user =", "APIRouter, HTTPException from models import user as user_model, match from resources.crud import read,", "import datetime from operator import attrgetter from fastapi import APIRouter, HTTPException from models", "update last_matched_time for each user user_data.last_matched_time = datetime.datetime.now() matched_user.last_matched_time = datetime.datetime.now() # commit", "datetime.date(day=three_days_after_match.day, month=three_days_after_match.month, year=three_days_after_match.year).strftime('%A %d %B %Y') raise HTTPException(status_code=403, detail=f\"You've already matched within the", "user as user_schemas from schemas.match import Match, FilterParams from . import session_dep match_router", "db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) # check if user has already", "Match(current_user_id=user_id, other_user_id=matched_user.id, other_user_name=f\"{matched_user.first_name} {matched_user.last_name}\") other_match_data = Match(current_user_id=matched_user.id, other_user_id=user_id, other_user_name=f\"{user_data.first_name} {user_data.last_name}\") # create match", "from schemas.match import Match, FilterParams from . import session_dep match_router = APIRouter() @match_router.post('/user/{user_id}/match/',", "int, filter_params: FilterParams, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) # check if", "current_time = datetime.datetime.now() if current_time < three_days_after_match: next_valid_date = datetime.date(day=three_days_after_match.day, month=three_days_after_match.month, year=three_days_after_match.year).strftime('%A %d", "if user has already matched in the past 3 days if user_data.last_matched_time: three_days_after_match", "datetime from operator import attrgetter from fastapi import APIRouter, HTTPException from models import", "%Y') raise HTTPException(status_code=403, detail=f\"You've already matched within the past 3 days. Wait till", "Match(current_user_id=matched_user.id, other_user_id=user_id, other_user_name=f\"{user_data.first_name} {user_data.last_name}\") # create match objects in the database current_match =", "db=db) # check if user has already matched in the past 3 days", "response_model=user_schemas.UserGet, status_code=200) async def get_latest_match(user_id: int, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db)", "user_schemas from schemas.match import Match, FilterParams from . import session_dep match_router = APIRouter()", "user user_data.last_matched_time = datetime.datetime.now() matched_user.last_matched_time = datetime.datetime.now() # commit all changes in the", "operator import attrgetter from fastapi import APIRouter, HTTPException from models import user as", "# update last_matched_time for each user user_data.last_matched_time = datetime.datetime.now() matched_user.last_matched_time = datetime.datetime.now() #", "three_days_after_match: next_valid_date = datetime.date(day=three_days_after_match.day, month=three_days_after_match.month, year=three_days_after_match.year).strftime('%A %d %B %Y') raise HTTPException(status_code=403, detail=f\"You've already", "add_new_match(user_id: int, filter_params: FilterParams, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) # check", "read, create, custom from schemas import user as user_schemas from schemas.match import Match,", "response_model=user_schemas.UserGet, status_code=201) async def add_new_match(user_id: int, filter_params: FilterParams, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id',", "in the past 3 days if user_data.last_matched_time: three_days_after_match = user_data.last_matched_time + datetime.timedelta(days=3) current_time", "db=db) other_match = create.create_single_isolated_resource(model=match.Match, data=other_match_data, db=db) # update last_matched_time for each user user_data.last_matched_time", "int, db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) if user_data is None or", "objects in the database current_match = create.create_single_isolated_resource(model=match.Match, data=current_match_data, db=db) other_match = create.create_single_isolated_resource(model=match.Match, data=other_match_data,", "check if user has already matched in the past 3 days if user_data.last_matched_time:", "match_router = APIRouter() @match_router.post('/user/{user_id}/match/', response_model=user_schemas.UserGet, status_code=201) async def add_new_match(user_id: int, filter_params: FilterParams, db=session_dep):", "= create.create_single_isolated_resource(model=match.Match, data=current_match_data, db=db) other_match = create.create_single_isolated_resource(model=match.Match, data=other_match_data, db=db) # update last_matched_time for", "user_data.previous_matches == []: raise HTTPException(status_code=404, detail='No matches made yet!') latest_match = max(user_data.previous_matches, key=attrgetter('matched_at'))", "schemas import user as user_schemas from schemas.match import Match, FilterParams from . import", "next_valid_date = datetime.date(day=three_days_after_match.day, month=three_days_after_match.month, year=three_days_after_match.year).strftime('%A %d %B %Y') raise HTTPException(status_code=403, detail=f\"You've already matched", "db=session_dep): user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db) if user_data is None or user_data.previous_matches", "datetime.timedelta(days=3) current_time = datetime.datetime.now() if current_time < three_days_after_match: next_valid_date = datetime.date(day=three_days_after_match.day, month=three_days_after_match.month, year=three_days_after_match.year).strftime('%A", "from schemas import user as user_schemas from schemas.match import Match, FilterParams from .", "current_match_data = Match(current_user_id=user_id, other_user_id=matched_user.id, other_user_name=f\"{matched_user.first_name} {matched_user.last_name}\") other_match_data = Match(current_user_id=matched_user.id, other_user_id=user_id, other_user_name=f\"{user_data.first_name} {user_data.last_name}\") #", "matched within the past 3 days. Wait till {next_valid_date}\") # run matching algorithm", "= custom.match_user(user_id=user_id, filter_params=filter_params, db=db) # create Match Pydantic models current_match_data = Match(current_user_id=user_id, other_user_id=matched_user.id,", "Wait till {next_valid_date}\") # run matching algorithm matched_user = custom.match_user(user_id=user_id, filter_params=filter_params, db=db) #", "import attrgetter from fastapi import APIRouter, HTTPException from models import user as user_model,", "month=three_days_after_match.month, year=three_days_after_match.year).strftime('%A %d %B %Y') raise HTTPException(status_code=403, detail=f\"You've already matched within the past", "user as user_model, match from resources.crud import read, create, custom from schemas import", "value=user_id, db=db) # check if user has already matched in the past 3", "from operator import attrgetter from fastapi import APIRouter, HTTPException from models import user", "datetime.datetime.now() if current_time < three_days_after_match: next_valid_date = datetime.date(day=three_days_after_match.day, month=three_days_after_match.month, year=three_days_after_match.year).strftime('%A %d %B %Y')", "past 3 days. Wait till {next_valid_date}\") # run matching algorithm matched_user = custom.match_user(user_id=user_id,", "return matched_user @match_router.get(\"/user/{user_id}/match/latest/\", response_model=user_schemas.UserGet, status_code=200) async def get_latest_match(user_id: int, db=session_dep): user_data = read.read_single_resource(model=user_model.User,", "= datetime.datetime.now() matched_user.last_matched_time = datetime.datetime.now() # commit all changes in the database db.add(current_match)", "3 days if user_data.last_matched_time: three_days_after_match = user_data.last_matched_time + datetime.timedelta(days=3) current_time = datetime.datetime.now() if", "{matched_user.last_name}\") other_match_data = Match(current_user_id=matched_user.id, other_user_id=user_id, other_user_name=f\"{user_data.first_name} {user_data.last_name}\") # create match objects in the", "in the database current_match = create.create_single_isolated_resource(model=match.Match, data=current_match_data, db=db) other_match = create.create_single_isolated_resource(model=match.Match, data=other_match_data, db=db)", "create.create_single_isolated_resource(model=match.Match, data=other_match_data, db=db) # update last_matched_time for each user user_data.last_matched_time = datetime.datetime.now() matched_user.last_matched_time", "= Match(current_user_id=user_id, other_user_id=matched_user.id, other_user_name=f\"{matched_user.first_name} {matched_user.last_name}\") other_match_data = Match(current_user_id=matched_user.id, other_user_id=user_id, other_user_name=f\"{user_data.first_name} {user_data.last_name}\") # create", "commit all changes in the database db.add(current_match) db.add(other_match) db.commit() db.refresh(user_data) return matched_user @match_router.get(\"/user/{user_id}/match/latest/\",", "datetime.datetime.now() # commit all changes in the database db.add(current_match) db.add(other_match) db.commit() db.refresh(user_data) return" ]
[ "getOutputHandle(db_file, out_label=\"lineages-fail\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=out_args[\"out_type\"]) fail_writer = writer(fail_handle, fields=out_fields) cloneseqs = {} clones", "1 elif i == 0 and curgap != 0: if curgap % 3", "for this by looking at the next codon over in the #alignment if", "receptors. cloneseqs (list): list of masked clone sequences. logs (dict): contains log information", "files for IgPhyML output Arguments: out_dir (str): directory for sequence files. useqs_f (dict):", "from textwrap import dedent from time import time from Bio.Seq import Seq from", "if scodons[i][2:3] != qi[2:3] or scodons[i + 1] != qi[3:6]: qi = \"NN\"", "nci = ambigchar[join[useqs[ki]]] if nci < ncountj: join[useqs[ki]] = useqs[kj] joinseqs[ki] = kj", "[30] * len(regions[\"cdr1_imgt\"]) + [45] * len( regions[\"fwr2_imgt\"]) + \\ [60] * len(regions[\"cdr2_imgt\"])", "of columns to append to sequence ID to ensure uniqueness.\"\"\") igphyml_group = parser.add_argument_group(\"IgPhyML", "sequence alignment and partition files for IgPhyML output Arguments: clones (list): receptor objects", "pass_count log[\"FAIL\"] = fail_count log[\"NONFUNCTIONAL\"] = fails[\"nf_fail\"] log[\"FRAMESHIFT_DEL\"] = fails[\"del_fail\"] log[\"FRAMESHIFT_INS\"] = fails[\"in_fail\"]", "whether a frameshift occured in a sequence Arguments: receptor (changeo.Receptor.Receptor): Receptor object. oqpos", "and parameters (r) in IgPhyML. omega (str): omega optimization in IgPhyML (--omega) kappa", "cid = delim + \"0\" sid = clones[num].sequence_id.translate(transtable) + \"_1\" + cid clonef.write(\">%s\\n%s\\n\"", "none: leave all intermediate files; all: delete all intermediate files.\"\"\") igphyml_group.add_argument(\"--optimize\", action=\"store\", dest=\"optimization\",", "r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID r.sequence_id = r.sequence_id.replace(\")\",\"-\") #remove parenthesis from sequence", "clean=\"none\", nohlp=False, asr=-1, format=default_format, out_args=default_out_args): \"\"\" Masks codons split by alignment to IMGT", "same among sequences in the same clone.\", \"Be sure to cluster sequences into", "= 1 fails[\"rec_count\"] += 1 #printProgress(rec_count, rec_count, 0.05, start_time) ptcs = hasPTC(r.sequence_imgt) gptcs", "txt) clean (str): delete intermediate files? (none, all) nohlp (bool): If True, only", "ros[(psite + 3 + ins):] receptor.sequence_imgt = ris[0:(pisite + 3)] + ris[(pisite +", "\"\"\" Parses command line arguments and calls main \"\"\" # Parse command line", "= getDbFields(db_file, reader=reader) # open input file handle = open(db_file, \"r\") records =", "= 0 for ins in range(1, 3): ros = receptor.sequence_input ris = receptor.sequence_imgt", "ngermline != germline: resolveglines = True if resolveglines: printError(\"%s %s\" % (\"Predicted germlines", "if r.clone in init_clone_sizes: init_clone_sizes[r.clone] += 1 else: init_clone_sizes[r.clone] = 1 for r", "oqpos, ospos, log, debug): \"\"\" Checks whether a frameshift occured in a sequence", "at that site and scan forward until you find a codon that matches", "= [] imgt = [] for j in range(0, nseqs): for i in", "sites = range(0, lg) transtable = clones[0].sequence_id.maketrans(\" \", \"_\") outfile = os.path.join(out_dir, \"%s.fasta\"", "debug): \"\"\" Checks whether a frameshift occured in a sequence Arguments: receptor (changeo.Receptor.Receptor):", "CDR/FWR partitioned model on this data.\\n\" imgtpartlabels = [0] * len(r.sequence_imgt) r.setField(\"imgtpartlabels\", imgtpartlabels)", "ins) < len(ros) and (pisite + 3) < len(ris): #cut out 1 or", "if mask: findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log, debug, recursive) if not", "= os.path.join(dir_name, clone_name) if not os.path.exists(clone_dir): os.makedirs(clone_dir) # Format options try: reader, writer,", "too small: \" + str(len(useqs_f)) logs[clones[num].sequence_id][\"PASS\"] = False return -len(useqs_f) elif not collapse", "add --- gaps back to IMGT sequence ncon_seq = \"\" counter = 0", "+ \"_\" + r.getField(m) total += maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer,", "imgt))) partf.write(\"\\n\") def outputIgPhyML(clones, sequences, meta_data=None, collapse=False, ncdr3=False, logs=None, fail_writer=None, out_dir=None, min_seq=1): \"\"\"", "positions for first sequence in clones, the germline sequence of the first receptor", "within a clone. \"\"\" keys = list(useqs.keys()) join = {} # id ->", "len(useqs_f) < min_seq: for seq_f, num in useqs_f.items(): logs[clones[num].sequence_id][\"FAIL\"] = \"Clone too small:", "4: os.remove(lsplit[0]) os.remove(lsplit[1]) os.remove(lsplit[3]) todelete.close() os.remove(outrep) os.remove(outfile) os.remove(gyout) cilog = outrep + \"_igphyml_CIlog.txt_hlp\"", "si[i] == \"-\": gaps.append(1) ndotgaps.append(1) else: gaps.append(0) nsi = nsi + si[i] if", "branch lengths (l) and parameters (r) in IgPhyML. omega (str): omega optimization in", "fasta.\"\"\") group.add_argument(\"--ncdr3\", action=\"store_true\", dest=\"ncdr3\", help=\"\"\"If specified, remove CDR3 from all sequences.\"\"\") group.add_argument(\"--nmask\", action=\"store_true\",", "Arguments: out_dir (str): directory for sequence files. useqs_f (dict): unique sequences mapped to", "three curgap = 0 for i in ndotgaps: if i == 1: curgap", "\"\"\" Remove CDR3 from all sequences and germline of a clone Arguments: sequences", "records: if r.functional is None: r.functional = True if found_no_funct is False: printWarning(\"FUNCTIONAL", "fails[\"seq_fail\"] log[\"END\"] = \"BuildTrees\" printLog(log) #Run IgPhyML on outputted data? if igphyml: runIgPhyML(pass_handle.name,", "range(0,len(meta_data)): md = r.getField(meta_data[m]) md = md.replace(\",\",\"-\") #remove commas from metadata md =", "range(0,len(si)): if si[i] == \"-\": gaps.append(1) ndotgaps.append(1) else: gaps.append(0) nsi = nsi +", "printError(\"GY94 tree building in IgPhyML failed\") log = OrderedDict() log[\"START\"] = \"IgPhyML HLP", "clones. clones (list): list of Receptor objects. \"\"\" for i in range(0,len(sequences)): imgtar", "if target_clones is None or r.clone in target_clones: if init_clone_sizes[r.clone] >= min_seq: big_enough.append(r)", "(clones[0].v_call.split(\"*\")[0])) partf.write(\"%s\\n\" % (clones[0].j_call.split(\"*\")[0])) partf.write(\",\".join(map(str, imgt))) partf.write(\"\\n\") def outputIgPhyML(clones, sequences, meta_data=None, collapse=False, ncdr3=False,", "< min_seq: for seq_f, num in useqs_f.items(): logs[clones[num].sequence_id][\"FAIL\"] = \"Clone too small: \"", "1 elif scodons[spos] == qcodons[qpos]: # if both are the same, move both", "i in gaps: #print(str(i) + \":\" + ncon_seq) if i == 1: ncon_seq", "== 4: os.remove(lsplit[0]) os.remove(lsplit[1]) os.remove(lsplit[3]) todelete.close() os.remove(outrep) os.remove(outfile) os.remove(gyout) cilog = outrep +", "+ regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + nseq imgtpartlabels =", "fails[\"region_fail\"] += 1 return 0 elif regions[\"fwr3_imgt\"] is not \"\" and regions[\"fwr3_imgt\"] is", "OrderedDict() log[\"ID\"]=receptor.sequence_id log[\"CLONE\"]=receptor.clone log[\"PASS\"] = True if debug: print(receptor.sequence_id) # adjust starting position", "= OrderedDict() log[\"START\"] = \"IgPhyML GY94 tree estimation\" printLog(log) try: #check for igphyml", "if dist == 0 and m_match: ncounti = ki.count(\"A\") + ki.count(\"T\") + ki.count(\"G\")", "# add --- gaps back to IMGT sequence ncon_seq = \"\" counter =", "(str): List of clone IDs to analyze. collapse (bool): if True collapse identical", "output format.\"\"\") igphyml_group.add_argument(\"--nohlp\", action=\"store_true\", dest=\"nohlp\", help=\"\"\"Don't run HLP model?\"\"\") igphyml_group.add_argument(\"--asr\", action=\"store\", dest=\"asr\", type=float,", "is not None: meta_data_ar = meta_data[0].split(\",\") for c in clones: if meta_data is", "for first sequence in clones, the germline sequence of the first receptor in", "= hasPTC(r.sequence_imgt) gptcs = hasPTC(r.getField(\"germline_imgt_d_mask\")) if gptcs >= 0: log = OrderedDict() log[\"ID\"]", "clones, cloneseqs, logs, fails, out_args, fail_writer, mask = not nmask) if total ==", "input sequence s_end (int): end of IMGT sequence qpos (int): starting position of", "log, debug, recursive=False): \"\"\" Find and mask split codons Arguments: receptor (changeo.Receptor.Receptor): Receptor", "r.dupcount if mout[1][\"FAIL\"] == \"FRAME-SHIFTING DELETION\": fails[\"del_fail\"] += 1 elif mout[1][\"FAIL\"] == \"SINGLE", "imgtpartlabels) mout = maskSplitCodons(r, mask=mask) mask_seq = mout[0] ptcs = hasPTC(mask_seq) if ptcs", "\"-\" and seq1[i] != \".\": if seq2[i] != \"N\" and seq2[i] != \"-\"", "part of a recursive call? mask (bool) : mask split codons for use", "non-match, at which point we\"ll just want to mask the #first codon in", "sequences[j] = sequences[j] + \"N\"*(seqdiff) last = cimgt[-1] cimgt.extend([last]*(imgtdiff)) clones[j].setField(\"imgtpartlabels\",cimgt) if meta_data is", "type=str, default=\"WRC_2:0,GYW_0:1,WA_1:2,TW_0:3,SYC_2:4,GRS_0:5\", help=\"\"\"Which motifs to estimate mutability.\"\"\") igphyml_group.add_argument(\"--hotness\", action=\"store\", dest=\"hotness\", type=str, default=\"e,e,e,e,e,e\", help=\"\"\"Mutability", "intermediate files? (none, all) nohlp (bool): If True, only estimate GY94 trees and", "nimgtar.append(imgtar[j]) else: ncdr3 += 1 clones[i].setField(\"imgtpartlabels\",nimgtar) clones[i].setField(\"germline_imgt_d_mask\", \"\".join(ngermline)) sequences[i] = \"\".join(nseq) #print(\"Length: \"", "# adjust starting position of query sequence qi = qi[(receptor.v_seq_start - 1):] #tally", "sequence modifications printDebug(ros, debug) printDebug(receptor.sequence_input, debug) printDebug(ris, debug) printDebug(receptor.sequence_imgt, debug) printDebug(\"RUNNING %d\" %", "= igf.readline().split(\"\\t\") for i in range(3,len(names)-1): log[names[i]] = round(float(vals[i]),2) printLog(log) if clean !=", "- len(germline) germline = germline + \"N\" * (seqdiff) if sites % 3", "TSV files into IgPhyML input files \"\"\" # Info __author__ = \"<NAME>\" from", "str(nproc),\"--outname\",gyout] hlp_args = [\"igphyml\",\"--repfile\", outrep, \"-m\", \"HLP\", \"--run_id\", \"hlp\", \"--threads\", str(nproc), \"-o\", optimization,", "log[\"SEQ_IN\"] = r.sequence_input logs[r.sequence_id] = log if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"nf_fail\"]", "only estimate GY94 trees and parameters clean (str): delete intermediate files? (none, all)", "# Define input and output field help message fields = dedent( \"\"\" output", "spos = ospos printDebug(\"But codon was apparently preserved\", debug) if \"IN-FRAME\" in log:", "in gaps: #print(str(i) + \":\" + ncon_seq) if i == 1: ncon_seq =", "dest=\"min_seq\", type=int, default=1, help=\"\"\"Minimum number of data sequences. Any clones with fewer than", "motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", nohlp=False, asr=-1, clean=\"none\"): \"\"\" Run IgPhyML on outputted data Arguments:", "__version__, __date__ # Imports import os import random import subprocess import multiprocessing as", "(str): input and output format. out_args (dict): arguments for output preferences. Returns: dict:", "len(ris): #cut out 1 or 2 nucleotides downstream of offending codon receptor.sequence_input =", "default=-1, help=\"\"\"Depth of reads to be subsampled (before deduplication).\"\"\") group.add_argument(\"--append\", nargs=\"+\", action=\"store\", dest=\"append\",", "all_records.append(r) if r.clone in init_clone_sizes: init_clone_sizes[r.clone] += 1 else: init_clone_sizes[r.clone] = 1 for", "len(i) != sites or len(clones[seqi].getField(\"imgtpartlabels\")) != len(imgtar): correctseqs = True if correctseqs: maxlen", "str(spos) concatenated_seq = Seq(\"\") for i in scodons: concatenated_seq += i # add", "else None log[\"RECORDS\"] = fails[\"totalreads\"] log[\"INITIAL_FILTER\"] = fails[\"rec_count\"] log[\"PASS\"] = pass_count log[\"FAIL\"] =", "gap only sites from observed data newgerm = [] imgt = [] for", "= log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"Germline PTC\" fails[\"seq_fail\"] += 1 fails[\"germlineptc\"]", "log[\"END\"] = \"BuildTrees\" printLog(log) #Run IgPhyML on outputted data? if igphyml: runIgPhyML(pass_handle.name, igphyml_out=igphyml_out,", "partf.write(\"FWR:IMGT\\n\") partf.write(\"CDR:IMGT\\n\") partf.write(\"%s\\n\" % (clones[0].v_call.split(\"*\")[0])) partf.write(\"%s\\n\" % (clones[0].j_call.split(\"*\")[0])) partf.write(\",\".join(map(str, imgt))) partf.write(\"\\n\") def outputIgPhyML(clones,", "indels start_time = time() printMessage(\"Correcting frames and indels of sequences\", start_time=start_time, width=50) #subsampling", "and remove them for now gaps = [] ndotgaps = [] nsi =", "parameters to estimate: e = estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--motifs\",", "and partition files for IgPhyML output Arguments: out_dir (str): directory for sequence files.", "group.add_argument(\"--minseq\", action=\"store\", dest=\"min_seq\", type=int, default=1, help=\"\"\"Minimum number of data sequences. Any clones with", "mid-codon qi,spos = correctMidCodonStart(scodons, qi, debug) qcodons = [qi[i:i + 3] for i", "[\"igphyml\", \"--repfile\", outfile, \"-m\", \"GY\", \"--run_id\", \"gy\", \"--outrep\", outrep, \"--threads\", str(nproc),\"--outname\",gyout] hlp_args =", "else: fails[\"other_fail\"] += 1 else: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] =", "time import time from Bio.Seq import Seq from functools import partial # Presto", "\"\": germline = clones[0].getField(\"germline_imgt\") correctseqs = False for seqi in range(0, len(sequences)): i", "fails[\"rec_count\"] += 1 #printProgress(rec_count, rec_count, 0.05, start_time) ptcs = hasPTC(r.sequence_imgt) gptcs = hasPTC(r.getField(\"germline_imgt_d_mask\"))", "= os.path.split(pass_handle.name) if out_args[\"out_name\"] is None: __, clone_name, __ = splitName(db_file) else: clone_name", "\"You'll have to do that yourself.\") log = OrderedDict() log[\"END\"] = \"IgPhyML analysis\"", "2 nucleotides downstream of offending codon receptor.sequence_input = ros[0:(psite + 3)] + ros[(psite", "fails[\"minseq_fail\"] log[\"CDRFWR_ERROR\"] = fails[\"region_fail\"] log[\"GERMLINE_PTC\"] = fails[\"germlineptc\"] log[\"OTHER_FAIL\"] = fails[\"other_fail\"] if collapse: log[\"DUPLICATE\"]", "dest=\"append\", help=\"\"\"List of columns to append to sequence ID to ensure uniqueness.\"\"\") igphyml_group", "(-t) motifs (str): motifs to use in IgPhyML (--motifs) hotness (str): motif in", "\"NNN\" if \"MASKED\" in log: log[\"MASKED\"] = log[\"MASKED\"] + \",\" + str(spos) else:", "is value in useqs dict). log (collections.OrderedDict): log of sequence errors. meta_data (str):", "dest=\"motifs\", type=str, default=\"WRC_2:0,GYW_0:1,WA_1:2,TW_0:3,SYC_2:4,GRS_0:5\", help=\"\"\"Which motifs to estimate mutability.\"\"\") igphyml_group.add_argument(\"--hotness\", action=\"store\", dest=\"hotness\", type=str, default=\"e,e,e,e,e,e\",", "if this was due to a frame-shift by repeating this method but with", "give misleading dupcount information if some sequences have ambiguous characters at polymorphic sites", "\\ [120] * len(regions[\"fwr4_imgt\"]) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or simgt !=", "len(imgtar): printError(\"IMGT assignments are not the same within clone %d!\\n\" % c.clone,False) printError(c.getField(\"imgtpartlabels\"),False)", "if fbreak: break return dist def deduplicate(useqs, receptors, log=None, meta_data=None, delim=\":\"): \"\"\" Collapses", "optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", clean=\"none\", nohlp=False, asr=-1, format=default_format, out_args=default_out_args): \"\"\" Masks", "receptor.sequence_imgt psite = receptor.v_seq_start - 1 + oqpos*3 pisite = ospos * 3", "duplicate sequence if only one in a clone. imgt (list) : IMGT numbering", "action=\"store\", dest=\"target_clones\", help=\"\"\"List of clone IDs to output, if specified.\"\"\") group.add_argument(\"--minseq\", action=\"store\", dest=\"min_seq\",", ") # print(\"\\n\" + str((germline))) for j in range(0,len(imgtar)): if imgtar[j] != 108:", "#print(\"seqlen: \" + str(len(sequences[i]))) #print(\"germline: \" + str(len(germline))) #if len(germline) < len(sequences[i]): #", "\"\"\" Characterize potential mismatches between IMGT labels within a clone Arguments: sequences (list):", "= True if resolveglines: printError(\"%s %s\" % (\"Predicted germlines are not the same", "if len(lsplit) == 4: os.remove(lsplit[0]) os.remove(lsplit[1]) os.remove(lsplit[3]) todelete.close() os.remove(outrep) os.remove(outfile) os.remove(gyout) cilog =", "= open(out_args[\"log_file\"], \"w\") for j in logs.keys(): printLog(logs[j], handle=log_handle) pass_handle.write(str(nclones)+\"\\n\") for key in", "\" + str(len(sequences[i]))) #print(\"germline: \" + str(len(germline))) #if len(germline) < len(sequences[i]): # print(\"\\n\"", "which point we\"ll just want to mask the #first codon in the IMGT", "not None: output[\"pass\"] = pass_handle.name pass_handle.close() if fail_handle is not None: output[\"fail\"] =", "[60]*len(regions[\"cdr2_imgt\"]) + [80]*len(regions[\"fwr3_imgt\"]) + [108] * len(regions[\"cdr3_imgt\"]) + \\ [120] * len(regions[\"fwr4_imgt\"]) r.setField(\"imgtpartlabels\",", "if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"failreads\"] += r.dupcount if mout[1][\"FAIL\"] == \"FRAME-SHIFTING", "else: log[\"END-MASKED\"] = str(spos) concatenated_seq = Seq(\"\") for i in scodons: concatenated_seq +=", "printLog(log) if clean != \"none\": log = OrderedDict() log[\"START\"] = \"CLEANING\" log[\"SCOPE\"] =", "to estimate mutability.\"\"\") igphyml_group.add_argument(\"--hotness\", action=\"store\", dest=\"hotness\", type=str, default=\"e,e,e,e,e,e\", help=\"\"\"Mutability parameters to estimate: e", "0 if r.functional and ptcs < 0: #If IMGT regions are provided, record", ": position of interest in input sequence. ospos (int) : position of interest", "files for each clone. lineages successfully processed records. lineages-fail database records failed processing.", "e = estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--motifs\", action=\"store\", dest=\"motifs\", type=str,", "bootstrap these data if desired lg = len(newgerm) sites = range(0, lg) transtable", "#estimate HLP parameters/trees p = subprocess.check_output(hlp_args) except subprocess.CalledProcessError as e: print(\" \".join(hlp_args)) print('error>',", "clone.\", \"Be sure to cluster sequences into clones first and then predict germlines", "in useqs_f and collapse: clones[useqs_f[conseq_f]].dupcount += clones[j].dupcount logs[clones[j].sequence_id][\"PASS\"] = False logs[clones[j].sequence_id][\"FAIL\"] = \"Duplication", "mask = not nmask) if total == sample_depth: break # Start processing clones", "#sometimes IMGT will just cut off first letter if non-match, at which point", "newgerm, conseqs, duplicate, imgt) if collapse: return len(useqs_f) else: return nseqs def maskCodonsLoop(r,", "range(0,len(imgtar)): if c.getField(\"imgtpartlabels\")[j] != imgtar[j]: printError(\"IMGT assignments are not the same within clone", "ambigchar[useqs[kj]] = ncountj # this algorithm depends on the fact that all sequences", "= c.getField(\"germline_imgt_d_mask\") if ngermline is \"\": ngermline = c.getField(\"germline_imgt\") if ngermline != germline:", "receptor (changeo.Receptor.Receptor): Receptor object. scodons (list): list of codons in IMGT sequence qcodons", "= \".\".join(osplit[0:(len(osplit)-1)]) + \"_gy.tsv\" gyout = outfile + \"_igphyml_stats_gy.txt\" gy_args = [\"igphyml\", \"--repfile\",", "oformat=\"tab\", clean=\"none\", nohlp=False, asr=-1, format=default_format, out_args=default_out_args): \"\"\" Masks codons split by alignment to", "\"failreads\":0} # Mask codons split by indels start_time = time() printMessage(\"Correcting frames and", "optimization=optimization, omega=omega, kappa=kappa, motifs=motifs, hotness=hotness, oformat=oformat, nohlp=nohlp,clean=clean,asr=asr) return output def getArgParser(): \"\"\" Defines", "an error occurs or masking fails. 1: returns 1 masking succeeds \"\"\" if", "recursive=True) if mout[1][\"PASS\"]: #if debug: receptor.sequence_input = ros receptor.sequence_imgt = ris frameshifts +=", "printLog(logs[j], handle=log_handle) pass_handle.write(str(nclones)+\"\\n\") for key in sorted(clonesizes, key=clonesizes.get, reverse=True): #print(key + \"\\t\" +", "qi spos = i break else: spos = i break return qi, spos", "OrderedDict() log[\"START\"] = \"IgPhyML GY94 tree estimation\" printLog(log) try: #check for igphyml executable", "frameshifts = 0 for ins in range(1, 3): ros = receptor.sequence_input ris =", "codons split by alignment to IMGT reference Arguments: r (changeo.Receptor.Receptor): receptor object for", "debug: print(\"checking %s at position %d %d\" % (scodons[spos], spos, qpos)) ospos=spos oqpos=qpos", "break else: receptor.sequence_input = ros receptor.sequence_imgt = ris return frameshifts def findAndMask(receptor, scodons,", "IgPhyML (-t) motifs (str): motifs to use in IgPhyML (--motifs) hotness (str): motif", "\"\"\" frameshifts = 0 while spos < s_end and qpos < len(qcodons): if", "\"\" for i in range(0,len(si)): if si[i] == \"-\": gaps.append(1) ndotgaps.append(1) else: gaps.append(0)", "for igphyml executable subprocess.check_output([\"igphyml\"]) except: printError(\"igphyml not found :-/\") try: #get GY94 starting", "= [qi[i:i + 3] for i in range(0, len(qi), 3)] frameshifts = 0", "+ \"NN\" if ncdr3: ngerm = [] nimgt = [] for i in", "delete intermediate files? (none, all) nohlp (bool): If True, only estimate GY94 trees", "< len(germline): ngermline.append(germline[j]) nimgtar.append(imgtar[j]) else: ncdr3 += 1 clones[i].setField(\"imgtpartlabels\",nimgtar) clones[i].setField(\"germline_imgt_d_mask\", \"\".join(ngermline)) sequences[i] =", "% (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) if len(useqs_f) == 1 and duplicate: if meta_data is", "vals = igf.readline().split(\"\\t\") for i in range(3,len(names)-1): log[names[i]] = round(float(vals[i]),2) printLog(log) if clean", "st = c.getField(meta_data[0])+\":\"+c.getField(meta_data_ar[m]) c.setField(meta_data[0],st) if len(c.getField(\"imgtpartlabels\")) != len(imgtar): printError(\"IMGT assignments are not the", "fails[\"region_fail\"] log[\"GERMLINE_PTC\"] = fails[\"germlineptc\"] log[\"OTHER_FAIL\"] = fails[\"other_fail\"] if collapse: log[\"DUPLICATE\"] = fail_count -", "list of receptors. cloneseqs (list): list of masked clone sequences. logs (dict): contains", "group.add_argument(\"--clones\", nargs=\"+\", action=\"store\", dest=\"target_clones\", help=\"\"\"List of clone IDs to output, if specified.\"\"\") group.add_argument(\"--minseq\",", "partition files for IgPhyML output Arguments: clones (list): receptor objects within the same", "False mout[1][\"FAIL\"] = \"PTC_ADDED_FROM_MASKING\" logs[mout[1][\"ID\"]] = mout[1] if mout[1][\"PASS\"]: #passreads += r.dupcount if", "differences. \"\"\" if len(seq1) != len(seq2): printError(\"Sequences are not the same length! %s", "metadata from ID. newgerm (str) : modified germline of clonal lineage. conseqs (list)", "help=\"\"\"Mutability parameters to estimate: e = estimate, ce = estimate + confidence interval\"\"\")", "open(out_args[\"log_file\"], \"w\") for j in logs.keys(): printLog(logs[j], handle=log_handle) pass_handle.write(str(nclones)+\"\\n\") for key in sorted(clonesizes,", "c.getField(\"germline_imgt_d_mask\") if ngermline is \"\": ngermline = c.getField(\"germline_imgt\") if ngermline != germline: resolveglines", "ACGT differences. \"\"\" if len(seq1) != len(seq2): printError(\"Sequences are not the same length!", "try: reader, writer, __ = getFormatOperators(format) except ValueError: printError(\"Invalid format %s.\" % format)", "printError(\"Sequences are not the same length! %s %s\" % (seq1, seq2)) dist =", "0 : printDebug(\"Frame-shifting gap detected! Refusing to include sequence.\", debug) log[\"PASS\"] = False", "\"all\"), dest=\"clean\", type=str, default=\"none\", help=\"\"\"Delete intermediate files? none: leave all intermediate files; all:", "qi spos = i break elif scodons[i][0] == \".\": scodons[i] = \"N\" +", "= True if meta_data is not None: matches = 0 for m in", "= md.replace(\")\",\"-\") #remove parenthesis from metadata md = md.replace(\"(\",\"-\") #remove parenthesis from metadata", "len(qcodons): if debug: print(scodons[spos] + \"\\t\" + qcodons[qpos]) if scodons[spos] == \"...\" and", "hasPTC(r.getField(\"germline_imgt_d_mask\")) if gptcs >= 0: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] =", "meta_data (str): Field to append to sequence IDs. Splits identical sequences with different", "sequences writer object. min_seq (int): minimum number of data sequences to include. Returns:", "debug) if \"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" + str(spos) else:", "#!/usr/bin/env python3 \"\"\" Converts TSV files into IgPhyML input files \"\"\" # Info", "append to sequence IDs. Splits identical sequences with different meta_data collapse (bool): if", "IgPhyML (--motifs) hotness (str): motif in IgPhyML (--hotness) oformat (str): output format for", "ros receptor.sequence_imgt = ris return frameshifts def findAndMask(receptor, scodons, qcodons, spos, s_end, qpos,", "IgPhyML output Arguments: clones (list): receptor objects within the same clone. sequences (list):", "line.split(\"\\t\") if len(lsplit) == 4: os.remove(lsplit[0]) os.remove(lsplit[1]) os.remove(lsplit[3]) todelete.close() os.remove(outrep) os.remove(outfile) os.remove(gyout) cilog", "if True remove all CDR3s. nmask (bool): if False, do not attempt to", "this by looking at the next codon over in the #alignment if scodons[i][2:3]", "receptor.sequence_input si = receptor.sequence_imgt log = OrderedDict() log[\"ID\"]=receptor.sequence_id log[\"CLONE\"]=receptor.clone log[\"PASS\"] = True if", "interval\"\"\") igphyml_group.add_argument(\"--oformat\", action=\"store\", dest=\"oformat\", type=str, default=\"tab\", choices=(\"tab\", \"txt\"), help=\"\"\"IgPhyML output format.\"\"\") igphyml_group.add_argument(\"--nohlp\", action=\"store_true\",", "and spos < s_end: printDebug(\"FAILING MATCH\", debug) log[\"PASS\"] = False #if no match", "tallies = [] for i in range(0, sites, 3): tally = 0 for", "looking at the next codon over in the #alignment if scodons[i][2:3] != qi[2:3]", "next site if debug: print(\"checking %s at position %d %d\" % (scodons[spos], spos,", "(bool): If True, only estimate GY94 trees and parameters format (str): input and", "meta_data_list = [] for m in meta_data: meta_data_list.append(clones[j].getField(m).replace(\":\", \"_\")) cid = delim +", "j)) #Resolve germline if there are differences, e.g. if reconstruction was done before", "trees and parameters format (str): input and output format. out_args (dict): arguments for", "log=True, format=True) # Define argument parser parser = ArgumentParser(description=__doc__, epilog=fields, parents=[parser_parent], formatter_class=CommonHelpFormatter, add_help=False)", "same, mask IMGT at that site and scan forward until you find a", "if r.functional is None: r.functional = True if found_no_funct is False: printWarning(\"FUNCTIONAL column", "+ \\ [120] * len(regions[\"fwr4_imgt\"]) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or simgt", "spos += 1 while qpos < len(qcodons) and spos < s_end and scodons[spos]", "#Run IgPhyML on outputted data? if igphyml: runIgPhyML(pass_handle.name, igphyml_out=igphyml_out, clone_dir=clone_dir, nproc=nproc, optimization=optimization, omega=omega,", "position of interest in IMGT sequence. log (dict) : log of information for", "within a clone (index is value in useqs dict). log (collections.OrderedDict): log of", "clones[j].dupcount logs[clones[j].sequence_id][\"PASS\"] = False logs[clones[j].sequence_id][\"FAIL\"] = \"Duplication of \" + clones[useqs_f[conseq_f]].sequence_id logs[clones[j].sequence_id][\"DUPLICATE\"]=True if", "(tab or txt) nohlp (bool): If True, only estimate GY94 trees and parameters", "partf.write(\"%d %d\\n\" % (2, len(newgerm))) partf.write(\"FWR:IMGT\\n\") partf.write(\"CDR:IMGT\\n\") partf.write(\"%s\\n\" % (clones[0].v_call.split(\"*\")[0])) partf.write(\"%s\\n\" % (clones[0].j_call.split(\"*\")[0]))", ": delimiter for extracting metadata from ID. newgerm (str) : modified germline of", "in input sequence. debug (bool) : print debugging statements. Returns: tuple: (modified input", "= ngerm imgt = nimgt #print(\"Length: \" + str(ncdr3)) useqs_f = OrderedDict() conseqs", "metadata r.setField(meta_data[m],md) if append is not None: if append is not None: for", "None: meta_data_list = [] for m in range(0,len(meta_data)): if isinstance(clones[j].getField(meta_data[m]), str): clones[j].setField(meta_data[m],clones[j].getField(meta_data[m]).replace(\"_\", \"\"))", "\"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) else: for j in range(0,", "Arguments: clones (list): receptor objects within the same clone. sequences (list): sequences within", "j in logs.keys(): printLog(logs[j], handle=log_handle) pass_handle.write(str(nclones)+\"\\n\") for key in sorted(clonesizes, key=clonesizes.get, reverse=True): #print(key", "unique sequences mapped to ids. meta_data (str): Field to append to sequence IDs.", "within clone %d!\\n\" % c.clone, False) printError(c.getField(\"imgtpartlabels\"), False) printError(\"%s\\n%d\\n\" % (imgtar, j)) #Resolve", "fail_writer is not None: fail_writer.writeReceptor(clones[j]) else: useqs_f[conseq_f] = j conseqs.append(conseq) if collapse: useqs_f", "and parameters clean (str): delete intermediate files? (none, all) \"\"\" osplit = outfile.split(\".\")", "id -> useq to join with (least ambiguous chars) ambigchar = {} #sequence", "else: clonesizes[str(k)] = outputIgPhyML(clones[str(k)], cloneseqs[str(k)], meta_data=meta_data, collapse=collapse, ncdr3=ncdr3, logs=logs, fail_writer=fail_writer, out_dir=clone_dir, min_seq=min_seq) #If", "args_dict = parseCommonArgs(args) del args_dict[\"db_files\"] # Call main for each input file for", "= \"IgPhyML GY94 tree estimation\" printLog(log) try: #check for igphyml executable subprocess.check_output([\"igphyml\"]) except:", "= \"\" if meta_data is not None: seq, cid = seq_f.split(delim) cid =", "debug (bool): print debugging statements? recursive (bool): was this function called recursively? \"\"\"", "\"BuildTrees\" printLog(log) #Run IgPhyML on outputted data? if igphyml: runIgPhyML(pass_handle.name, igphyml_out=igphyml_out, clone_dir=clone_dir, nproc=nproc,", "print(\"\\n \" + str((sequences[i])) ) # print(\"\\n\" + str((germline))) for j in range(0,len(imgtar)):", "clone_name, __ = splitName(db_file) else: clone_name = out_args[\"out_name\"] if dir_name is None: clone_dir", "seq1[i] != \"-\" and seq1[i] != \".\": if seq2[i] != \"N\" and seq2[i]", "+ 3)] + ros[(psite + 3 + ins):] receptor.sequence_imgt = ris[0:(pisite + 3)]", "ncountj: join[useqs[ki]] = useqs[kj] joinseqs[ki] = kj else: ncj = 0 if useqs[kj]", "(least ambiguous chars) ambigchar = {} #sequence id -> number ATCG nucleotides for", "Run IgPhyML on outputted data Arguments: outfile (str): Output file name. igphymlout (str):", "+ regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] nseq = r.sequence_imgt[len(simgt):len(r.sequence_imgt)] if len(simgt) <", "i break else: spos = i break return qi, spos def checkFrameShifts(receptor, oqpos,", "[45]*len(regions[\"fwr2_imgt\"]) + \\ [60]*len(regions[\"cdr2_imgt\"]) + [80]*len(regions[\"fwr3_imgt\"]) + [108] * len(regions[\"cdr3_imgt\"]) + \\ [120]", "concatenated_seq return concatenated_seq, log def unAmbigDist(seq1, seq2, fbreak=False): \"\"\" Calculate the distance between", "= False qi = receptor.sequence_input si = receptor.sequence_imgt log = OrderedDict() log[\"ID\"]=receptor.sequence_id log[\"CLONE\"]=receptor.clone", "= [] # remove gap only sites from observed data newgerm = []", "files \"\"\" # Info __author__ = \"<NAME>\" from changeo import __version__, __date__ #", "clones: ngermline = c.getField(\"germline_imgt_d_mask\") if ngermline is \"\": ngermline = c.getField(\"germline_imgt\") if ngermline", "sequences have ambiguous characters at polymorphic sites def buildTrees(db_file, meta_data=None, target_clones=None, collapse=False, ncdr3=False,", "writer, __ = getFormatOperators(format) except ValueError: printError(\"Invalid format %s.\" % format) out_fields =", "= line.rstrip(\"\\r\") lsplit = line.split(\"\\t\") if len(lsplit) == 4: os.remove(lsplit[0]) os.remove(lsplit[1]) os.remove(lsplit[3]) todelete.close()", "all sequences are compared pairwise, and all are zero # distance from the", "OrderedDict() log[\"START\"] = \"CLEANING\" log[\"SCOPE\"] = clean printLog(log) todelete = open(outrep) for line", "in useqs_f.items(): seq = seq_f cid = \"\" if meta_data is not None:", "statements? recursive (bool): was this function called recursively? \"\"\" frameshifts = 0 while", "return useqs def hasPTC(sequence): \"\"\" Determines whether a PTC exits in a sequence", "0 elif regions[\"fwr3_imgt\"] is not \"\" and regions[\"fwr3_imgt\"] is not None: simgt =", "clones, the germline sequence of the first receptor in clones, the length of", "1 elif qcodons[qpos] == \"N\": # possible that SEQ-IMGT ends on a bunch", "ncj = 0 if useqs[kj] in join: ncj = ambigchar[join[useqs[kj]]] if ncj <", "imgtar[j] != 108: nseq.append(sequences[i][j]) if j < len(germline): ngermline.append(germline[j]) nimgtar.append(imgtar[j]) else: ncdr3 +=", "partf: partf.write(\"%d %d\\n\" % (2, len(newgerm))) partf.write(\"FWR:IMGT\\n\") partf.write(\"CDR:IMGT\\n\") partf.write(\"%s\\n\" % (clones[0].v_call.split(\"*\")[0])) partf.write(\"%s\\n\" %", "log[\"OUTPUT\"] = igphyml_out if oformat == \"tab\": igf = open(igphyml_out) names = igf.readline().split(\"\\t\")", "> 0: random.shuffle(big_enough) total = 0 for r in big_enough: if r.functional is", "= r.sequence_id.replace(\":\",\"-\") #remove colons from sequence ID r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from", "(bool): break after first difference found? Returns: int: number of ACGT differences. \"\"\"", "characters at polymorphic sites def buildTrees(db_file, meta_data=None, target_clones=None, collapse=False, ncdr3=False, nmask=False, sample_depth=-1, min_seq=1,append=None,", "= r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID r.sequence_id = r.sequence_id.replace(\")\",\"-\") #remove parenthesis from", "!= imgtar[j]: printError(\"IMGT assignments are not the same within clone %d!\\n\" % c.clone,", "original codon, it was preserved qpos -= 1 spos = ospos printDebug(\"But codon", "hlp_args.append(\"--ASRc\") hlp_args.append(str(asr)) log = OrderedDict() log[\"START\"] = \"IgPhyML GY94 tree estimation\" printLog(log) try:", "before deduplication min_seq (int): minimum number of sequences per clone append (str): column", "id to join with (least ambiguous chars) joinseqs = {} # id ->", "Ns qpos += 1 spos += 1 else: # if not the same,", "None: meta_data_list = [] for m in meta_data: meta_data_list.append(clones[j].getField(m).replace(\":\", \"_\")) cid = delim", "a clone Arguments: sequences (list): list of sequences in clones. clones (list): list", "output format for IgPhyML (tab or txt) nohlp (bool): If True, only estimate", "action=\"store\", dest=\"oformat\", type=str, default=\"tab\", choices=(\"tab\", \"txt\"), help=\"\"\"IgPhyML output format.\"\"\") igphyml_group.add_argument(\"--nohlp\", action=\"store_true\", dest=\"nohlp\", help=\"\"\"Don't", "sequences will be excluded.\"\"\") group.add_argument(\"--sample\", action=\"store\", dest=\"sample_depth\", type=int, default=-1, help=\"\"\"Depth of reads to", "fbreak=False): \"\"\" Calculate the distance between two sequences counting only A,T,C,Gs Arguments: seq1", "#if debug: receptor.sequence_input = ros receptor.sequence_imgt = ris frameshifts += 1 printDebug(\"FRAMESHIFT of", "3)] # deal with the fact that it's possible to start mid-codon qi,spos", "index in Receptor list. receptors (dict): receptors within a clone (index is value", "\"--threads\", str(nproc), \"-o\", optimization, \"--omega\", omega, \"-t\", kappa, \"--motifs\", motifs, \"--hotness\", hotness, \"--oformat\",", "Calculate the distance between two sequences counting only A,T,C,Gs Arguments: seq1 (str): sequence", "list of codons in input sequence spos (int): starting position of IMGT sequence", "line arguments parser = getArgParser() checkArgs(parser) args = parser.parse_args() args_dict = parseCommonArgs(args) del", "use when appending meta_data. Returns: list: deduplicated receptors within a clone. \"\"\" keys", "maximgt = len(imgtar) sites = maxlen for j in range(0,len(sequences)): cimgt = clones[j].getField(\"imgtpartlabels\")", "fails[\"totalreads\"] = len(all_records) #fails[\"minseq_fail\"] = len(all_records) - len(big_enough) if len(big_enough) == 0: printError(\"\\n\\nNo", "adjust starting position of query sequence qi = qi[(receptor.v_seq_start - 1):] #tally where", "frameshifts += checkFrameShifts(receptor, oqpos, ospos, log, debug) elif spos >= s_end or qcodons[qpos]", "fails[\"rec_count\"] - pass_count # End clone processing printMessage(\"Done\", start_time=start_time, end=True, width=50) log_handle =", "+= 1 #printProgress(rec_count, rec_count, 0.05, start_time) ptcs = hasPTC(r.sequence_imgt) gptcs = hasPTC(r.getField(\"germline_imgt_d_mask\")) if", "\"\"\" Create intermediate sequence alignment and partition files for IgPhyML output Arguments: clones", "seq1[i] != \".\": if seq2[i] != \"N\" and seq2[i] != \"-\" and seq2[i]", "in range(0, sites, 3): tally = 0 for j in range(0, nseqs): if", "from ID. newgerm (str) : modified germline of clonal lineage. conseqs (list) :", "split codons Arguments: receptor (changeo.Receptor.Receptor): Receptor object. scodons (list): list of codons in", "printLog(log) # Open output files out_label = \"lineages\" pass_handle = getOutputHandle(db_file, out_label=out_label, out_dir=out_args[\"out_dir\"],", "seq2[i]: dist += 1 if fbreak: break return dist def deduplicate(useqs, receptors, log=None,", "+ str(spos) else: log[\"IN-FRAME\"] = str(spos) elif qpos >= len(qcodons) and spos <", "presto.IO import printLog, printMessage, printWarning, printError, printDebug from changeo.Defaults import default_format from changeo.IO", "scodons[ospos]: #if codon in previous position is equal to original codon, it was", "\"Clone too small: \" + str(len(conseqs)) logs[clones[j].sequence_id][\"PASS\"] = False return -len(conseqs) # Output", "s_end and scodons[spos] != qcodons[qpos]: printDebug(\"Checking \" + scodons[spos]+ \"\\t\" + qcodons[qpos], debug)", "for i in range(0, len(qi), 3)] frameshifts = 0 s_end = 0 #adjust", "\"passreads\":0, \"failreads\":0} # Mask codons split by indels start_time = time() printMessage(\"Correcting frames", "for j in range(0, len(clones[str(k)])): logs[clones[str(k)][j].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(cloneseqs[str(k)]))", "#print(str(i) + \":\" + ncon_seq) if i == 1: ncon_seq = ncon_seq +", "j in range(0,len(imgtar)): if c.getField(\"imgtpartlabels\")[j] != imgtar[j]: printError(\"IMGT assignments are not the same", "imgtar = clones[i].getField(\"imgtpartlabels\") germline = clones[i].getField(\"germline_imgt_d_mask\") nseq = [] nimgtar = [] ngermline", "= r.fwr4_imgt + (\".\"*(len(r.sequence_imgt) - len(simgt))) simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"]", "== rj.getField(m) and m != \"DUPCOUNT\": matches += 1 m_match = (matches ==", "not None: log_handle = open(out_args[\"log_file\"], \"w\") for j in logs.keys(): printLog(logs[j], handle=log_handle) pass_handle.write(str(nclones)+\"\\n\")", "not log[\"PASS\"] and not recursive: log[\"FRAMESHIFTS\"] = frameshifts if len(scodons[-1]) != 3: if", "of clone IDs to output, if specified.\"\"\") group.add_argument(\"--minseq\", action=\"store\", dest=\"min_seq\", type=int, default=1, help=\"\"\"Minimum", "of data sequences to include. Returns: int: number of clones. \"\"\" s =", "\"_\") outfile = os.path.join(out_dir, \"%s.fasta\" % clones[0].clone) with open(outfile, \"w\") as clonef: if", "else: clone_dir = os.path.join(dir_name, clone_name) if not os.path.exists(clone_dir): os.makedirs(clone_dir) # Format options try:", "(none, all) nohlp (bool): If True, only estimate GY94 trees and parameters format", "IMGT gap, move forward in imgt spos += 1 elif scodons[spos] == qcodons[qpos]:", "meta_data is not None: meta_data_list = [] for m in range(0,len(meta_data)): if isinstance(clones[j].getField(meta_data[m]),", "format=True) # Define argument parser parser = ArgumentParser(description=__doc__, epilog=fields, parents=[parser_parent], formatter_class=CommonHelpFormatter, add_help=False) group", "\"TRG\", \"TAR\", \"TGR\", \"TRR\") for i in range(0, len(sequence), 3): if sequence[i:(i+3)] in", "+ confidence interval\"\"\") igphyml_group.add_argument(\"-t\", action=\"store\", dest=\"kappa\", type=str, default=\"e\", choices=(\"e\", \"ce\"), help=\"\"\"Kappa parameters to", "log information for each sequence. fails (dict): counts of various sequence processing failures.", "help=\"\"\"IgPhyML output format.\"\"\") igphyml_group.add_argument(\"--nohlp\", action=\"store_true\", dest=\"nohlp\", help=\"\"\"Don't run HLP model?\"\"\") igphyml_group.add_argument(\"--asr\", action=\"store\", dest=\"asr\",", "(int): starting position of input sequence in IMGT sequence log (dict): log of", "output pass and fail files. \"\"\" # Print parameter info log = OrderedDict()", "= \"UNKNOWN\" def maskSplitCodons(receptor, recursive=False, mask=True): \"\"\" Identify junction region by IMGT definition.", "file partfile = os.path.join(out_dir, \"%s.part.txt\" % clones[0].clone) with open(partfile, \"w\") as partf: partf.write(\"%d", "= OrderedDict() log[\"ID\"]=receptor.sequence_id log[\"CLONE\"]=receptor.clone log[\"PASS\"] = True if debug: print(receptor.sequence_id) # adjust starting", "Parent parser parser_parent = getCommonArgParser(out_file=False, log=True, format=True) # Define argument parser parser =", "help=\"\"\"If specified, collapse identical sequences before exporting to fasta.\"\"\") group.add_argument(\"--ncdr3\", action=\"store_true\", dest=\"ncdr3\", help=\"\"\"If", "is not None: simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] +", "% newgerm[i].replace(\".\",\"-\")) clonef.write(\"\\n\") #output partition file partfile = os.path.join(out_dir, \"%s.part.txt\" % clones[0].clone) with", "\"UNKNOWN\" def maskSplitCodons(receptor, recursive=False, mask=True): \"\"\" Identify junction region by IMGT definition. Arguments:", "is not None: seq, cid = seq_f.split(delim) cid = delim + cid.replace(\":\", \"_\")", "couldn't find upstream match\" % (scodons[ospos], ospos), debug) log[\"PASS\"]=False log[\"FAIL\"]=\"FAILED_MATCH:\"+str(spos) elif qcodons[qpos] ==", "= open(igphyml_out) names = igf.readline().split(\"\\t\") vals = igf.readline().split(\"\\t\") for i in range(3,len(names)-1): log[names[i]]", "* int(len(nseq)) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or simgt != r.sequence_imgt: log", "concatenated sequences outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt)", "on outputted data Arguments: outfile (str): Output file name. igphymlout (str): igphyml output", "\"fdcount\":0, \"totalreads\":0, \"passreads\":0, \"failreads\":0} # Mask codons split by indels start_time = time()", "%d, at end of subject sequence\" % (scodons[ospos], ospos), debug) if \"END-MASKED\" in", "rj = receptors[useqs[kj]] dist = unAmbigDist(ski, skj, True) m_match = True if meta_data", "than the specified number of sequences will be excluded.\"\"\") group.add_argument(\"--sample\", action=\"store\", dest=\"sample_depth\", type=int,", "open input file handle = open(db_file, \"r\") records = reader(handle) fail_handle, fail_writer =", "dest=\"ncdr3\", help=\"\"\"If specified, remove CDR3 from all sequences.\"\"\") group.add_argument(\"--nmask\", action=\"store_true\", dest=\"nmask\", help=\"\"\"If specified,", "del useqs[k] return useqs def hasPTC(sequence): \"\"\" Determines whether a PTC exits in", ">= len(qcodons) and spos < s_end: printDebug(\"FAILING MATCH\", debug) log[\"PASS\"] = False #if", "position of IMGT sequence in input sequence). \"\"\" spos = 0 for i", "parameters (r) in IgPhyML. omega (str): omega optimization in IgPhyML (--omega) kappa (str):", "\"__main__\": \"\"\" Parses command line arguments and calls main \"\"\" # Parse command", "# bootstrap these data if desired lg = len(newgerm) sites = range(0, lg)", "specified criteria.\",1) if sample_depth > 0: random.shuffle(big_enough) total = 0 for r in", "sequences before exporting to fasta.\"\"\") group.add_argument(\"--ncdr3\", action=\"store_true\", dest=\"ncdr3\", help=\"\"\"If specified, remove CDR3 from", "\"tab\": os.rmdir(clone_dir) else: printWarning(\"Using --clean all with --oformat txt will delete all tree", "% (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) germ_id = [\"GERM\"] if meta_data is not None: for", "data if desired lg = len(newgerm) sites = range(0, lg) transtable = clones[0].sequence_id.maketrans(\"", "length %d!\" % ins, debug) log[\"FAIL\"] = \"SINGLE FRAME-SHIFTING INSERTION\" break else: receptor.sequence_input", "log is not None: log[rfrom.sequence_id][\"PASS\"] = False log[rfrom.sequence_id][\"DUPLICATE\"] = True log[rfrom.sequence_id][\"COLLAPSETO\"] = joinseqs[k]", "of receptor objects. collapse (bool) : deduplicate sequences. nseqs (int): number of sequences.", "spos += 1 elif scodons[spos] == qcodons[qpos]: # if both are the same,", "\"--outname\", igphyml_out] if asr >= 0: hlp_args.append(\"--ASRc\") hlp_args.append(str(asr)) log = OrderedDict() log[\"START\"] =", "!= \"N\" and seq1[i] != \"-\" and seq1[i] != \".\": if seq2[i] !=", "= sequences[j] + \"N\"*(seqdiff) last = cimgt[-1] cimgt.extend([last]*(imgtdiff)) clones[j].setField(\"imgtpartlabels\",cimgt) if meta_data is not", "subprocess import multiprocessing as mp from argparse import ArgumentParser from collections import OrderedDict", "= \"N\" + scodons[i][1:3] if scodons[i][1:3] != qi[1:3] or scodons[i+1] != qi[3:6]: qi", "= 0 for j in range(0, nseqs): if sequences[j][i:(i + 3)] != \"...\":", "is equal to original codon, it was preserved qpos -= 1 spos =", "meta_data[0] == \"DUPCOUNT\": cid = delim + \"0\" sid = clones[num].sequence_id.translate(transtable) + \"_1\"", "frame. This attempts to correct for this by looking at the next codon", "log[\"FAIL\"] = \"UNKNOWN\" def maskSplitCodons(receptor, recursive=False, mask=True): \"\"\" Identify junction region by IMGT", "curgap = 0 for i in ndotgaps: if i == 1: curgap +=", "else: log[\"IN-FRAME\"] = str(spos) elif qpos >= len(qcodons) and spos < s_end: printDebug(\"FAILING", "FRAME-SHIFTING INSERTION\": fails[\"in_fail\"] += 1 else: fails[\"other_fail\"] += 1 else: log = OrderedDict()", "os.path.basename(pass_handle.name) if pass_handle is not None else None log[\"RECORDS\"] = fails[\"totalreads\"] log[\"INITIAL_FILTER\"] =", "Returns: int: number of clones. \"\"\" s = \"\" delim = \"_\" duplicate", "+ \"N\" * (seqdiff) if sites % 3 != 0: printError(\"number of sites", "clone processing printMessage(\"Done\", start_time=start_time, end=True, width=50) log_handle = None if out_args[\"log_file\"] is not", "if germline is \"\": germline = clones[0].getField(\"germline_imgt\") correctseqs = False for seqi in", "else: ncj = 0 if useqs[kj] in join: ncj = ambigchar[join[useqs[kj]]] if ncj", "parser_parent = getCommonArgParser(out_file=False, log=True, format=True) # Define argument parser parser = ArgumentParser(description=__doc__, epilog=fields,", "if len(i) != sites or len(clones[seqi].getField(\"imgtpartlabels\")) != len(imgtar): correctseqs = True if correctseqs:", "in clones: ngermline = c.getField(\"germline_imgt_d_mask\") if ngermline is \"\": ngermline = c.getField(\"germline_imgt\") if", "= clean printLog(log) todelete = open(outrep) for line in todelete: line = line.rstrip(\"\\n\")", "uniqueness.\"\"\") igphyml_group = parser.add_argument_group(\"IgPhyML arguments (see igphyml -h for details)\") igphyml_group.add_argument(\"--igphyml\", action=\"store_true\", dest=\"igphyml\",", "negative if clonesizes[str(k)] > 0: nclones += 1 pass_count += clonesizes[str(k)] else: fails[\"seq_fail\"]", "maxlen - len(sequences[j]) imgtdiff = len(imgtar)-len(cimgt) sequences[j] = sequences[j] + \"N\"*(seqdiff) last =", "if found. \"\"\" ptcs = (\"TAA\", \"TGA\", \"TAG\", \"TRA\", \"TRG\", \"TAR\", \"TGR\", \"TRR\")", "regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] if", "!= len(r.sequence_imgt) or simgt != r.sequence_imgt: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"]", "else: gaps.append(0) nsi = nsi + si[i] if si[i] != \".\": ndotgaps.append(0) #find", "with \" + rto.sequence_id del useqs[k] return useqs def hasPTC(sequence): \"\"\" Determines whether", "log_handle.close() #printProgress(rec_count, rec_count, 0.05, start_time) log = OrderedDict() log[\"OUTPUT\"] = os.path.basename(pass_handle.name) if pass_handle", "\"e,ce\", \"ce,ce\"), help=\"\"\"Omega parameters to estimate for FWR,CDR respectively: e = estimate, ce", "by alignment to IMGT reference Arguments: r (changeo.Receptor.Receptor): receptor object for a particular", "ID to ensure uniqueness.\"\"\") igphyml_group = parser.add_argument_group(\"IgPhyML arguments (see igphyml -h for details)\")", "input tab-delimited database file. meta_data (str): Field to append to sequence IDs. Splits", "clones[0].getField(\"germline_imgt\") correctseqs = False for seqi in range(0, len(sequences)): i = sequences[seqi] if", "nseqs def outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt):", "field help message fields = dedent( \"\"\" output files: <folder> folder containing fasta", "108: nseq.append(sequences[i][j]) if j < len(germline): ngermline.append(germline[j]) nimgtar.append(imgtar[j]) else: ncdr3 += 1 clones[i].setField(\"imgtpartlabels\",nimgtar)", "regions[\"fwr4_imgt\"] imgtpartlabels = [13]*len(regions[\"fwr1_imgt\"]) + [30]*len(regions[\"cdr1_imgt\"]) + [45]*len(regions[\"fwr2_imgt\"]) + \\ [60]*len(regions[\"cdr2_imgt\"]) + [80]*len(regions[\"fwr3_imgt\"])", "join: nci = ambigchar[join[useqs[ki]]] if nci < ncountj: join[useqs[ki]] = useqs[kj] joinseqs[ki] =", "scodons[-1] == \".\": scodons[-1] = \"...\" else: scodons[-1] = \"NNN\" if \"END-MASKED\" in", "i in range(0,len(seq1)): if seq1[i] != \"N\" and seq1[i] != \"-\" and seq1[i]", "identical sequences with different meta_data target_clones (str): List of clone IDs to analyze.", "or masking fails. 1: returns 1 masking succeeds \"\"\" if r.clone is None:", "delim) if collapse and len(useqs_f) < min_seq: for seq_f, num in useqs_f.items(): logs[clones[num].sequence_id][\"FAIL\"]", "if curgap % 3 != 0 : printDebug(\"Frame-shifting gap detected! Refusing to include", "sequence columns not detected.\\n! Cannot run CDR/FWR partitioned model on this data.\\n\" imgtpartlabels", "(bool): print debugging statements? recursive (bool): was this function called recursively? \"\"\" frameshifts", "fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"failreads\"] += r.dupcount if mout[1][\"FAIL\"] == \"FRAME-SHIFTING DELETION\": fails[\"del_fail\"]", "codons Arguments: scodons (list): list of codons in IMGT sequence. qi (str) :", "+= i # add --- gaps back to IMGT sequence ncon_seq = \"\"", "IgPhyML input files \"\"\" # Info __author__ = \"<NAME>\" from changeo import __version__,", "\"N\" + scodons[i][1:3] if scodons[i][1:3] != qi[1:3] or scodons[i+1] != qi[3:6]: qi =", "in output fasta file sequence headers.\"\"\") group.add_argument(\"--clones\", nargs=\"+\", action=\"store\", dest=\"target_clones\", help=\"\"\"List of clone", ": log of information for each sequence. debug (bool) : print debugging statements.", "\"del_fail\":0, \"in_fail\":0, \"minseq_fail\":0, \"other_fail\":0, \"region_fail\":0, \"germlineptc\":0, \"fdcount\":0, \"totalreads\":0, \"passreads\":0, \"failreads\":0} # Mask codons", "sequence. log (dict) : log of information for each sequence. debug (bool) :", "range(0,len(meta_data)): if isinstance(clones[j].getField(meta_data[m]), str): clones[j].setField(meta_data[m],clones[j].getField(meta_data[m]).replace(\"_\", \"\")) meta_data_list.append(str(clones[j].getField(meta_data[m]))) conseq_f = \"\".join([str(seq_rec) for seq_rec in", "= r.clone log[\"SEQ_IN\"] = r.sequence_input log[\"SEQ_IMGT\"] = r.sequence_imgt logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"] =", "only A,T,C,Gs Arguments: seq1 (str): sequence 1 seq2 (str): sequence 2 fbreak (bool):", "open(db_file, \"r\") records = reader(handle) fail_handle, fail_writer = None, None if out_args[\"failed\"]: fail_handle", "= False for r in records: if r.functional is None: r.functional = True", "1 and duplicate: if meta_data is not None: if meta_data[0] == \"DUPCOUNT\": cid", "frameshift occured in a sequence Arguments: receptor (changeo.Receptor.Receptor): Receptor object. oqpos (int) :", "nseqs == 1 and duplicate: if meta_data is not None: if meta_data[0] ==", "dict). log (collections.OrderedDict): log of sequence errors. meta_data (str): Field to append to", "export datasets until sequences are clustered into clones.\") if r.dupcount is None: r.dupcount", "0, 0 printMessage(\"Processing clones\", start_time=start_time, width=50) for k in clones.keys(): if len(clones[str(k)]) <", "DELETION\": fails[\"del_fail\"] += 1 elif mout[1][\"FAIL\"] == \"SINGLE FRAME-SHIFTING INSERTION\": fails[\"in_fail\"] += 1", "#if codon in previous position is equal to original codon, it was preserved", "IMGT gapped sequence. log: dict of sequence information \"\"\" debug = False qi", "alignment to IMGT reference Arguments: r (changeo.Receptor.Receptor): receptor object for a particular sequence.", "are the same, move both forward spos += 1 qpos += 1 elif", "= hotness log[\"NPROC\"] = nproc printLog(log) if not nohlp: try: #estimate HLP parameters/trees", "= range(0, lg) transtable = clones[0].sequence_id.maketrans(\" \", \"_\") outfile = os.path.join(out_dir, \"%s.fasta\" %", "to join with (least ambiguous chars) ambigchar = {} #sequence id -> number", "= maskSplitCodons(receptor, recursive=True) if mout[1][\"PASS\"]: #if debug: receptor.sequence_input = ros receptor.sequence_imgt = ris", "0 if an error occurs or masking fails. 1: returns 1 masking succeeds", "log (dict): log of information for each sequence debug (bool): print debugging statements?", "+= clones[j].dupcount logs[clones[j].sequence_id][\"PASS\"] = False logs[clones[j].sequence_id][\"FAIL\"] = \"Duplication of \" + clones[useqs_f[conseq_f]].sequence_id logs[clones[j].sequence_id][\"DUPLICATE\"]=True", "(str): sequence 1 seq2 (str): sequence 2 fbreak (bool): break after first difference", "estimates and lineage trees from running IgPhyML, if specified required fields: sequence_id, sequence,", "debug) printDebug(\"RUNNING %d\" % ins, debug) mout = maskSplitCodons(receptor, recursive=True) if mout[1][\"PASS\"]: #if", "pass_handle is not None: output[\"pass\"] = pass_handle.name pass_handle.close() if fail_handle is not None:", "name to append to sequence_id igphyml (bool): If True, run IgPhyML on outputted", "for j in range(0,len(sequences)): if len(sequences[j]) > maxlen: maxlen = len(sequences[j]) if len(clones[j].getField(\"imgtpartlabels\"))", "r.sequence_id log[\"CLONE\"] = r.clone log[\"PASS\"] = False log[\"FAIL\"] = \"NONFUNCTIONAL/PTC\" log[\"SEQ_IN\"] = r.sequence_input", "in IgPhyML (--omega) kappa (str): kappa optimization in IgPhyML (-t) motifs (str): motifs", "= delim + str(delim.join(meta_data_list)) sid = clones[j].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\",", "id -> sequence id to join with (least ambiguous chars) joinseqs = {}", "clonesizes[str(k)] = outputIgPhyML(clones[str(k)], cloneseqs[str(k)], meta_data=meta_data, collapse=collapse, ncdr3=ncdr3, logs=logs, fail_writer=fail_writer, out_dir=clone_dir, min_seq=min_seq) #If clone", "j in range(0, nseqs): if sequences[j][i:(i + 3)] != \"...\": tally += 1", "+ kj.count(\"C\") ambigchar[useqs[ki]] = ncounti ambigchar[useqs[kj]] = ncountj # this algorithm depends on", "if collapse: log[\"DUPLICATE\"] = fail_count - fails[\"seq_fail\"] log[\"END\"] = \"BuildTrees\" printLog(log) #Run IgPhyML", "imgtar, germline, sites, nseqs def outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm,", "ncdr3 = 0 for j in range(0, len(imgt)): if imgt[j] != 108: nseq.append(newseqs[i][j])", "0 if mask: findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log, debug, recursive) if", "(str): column name to append to sequence_id igphyml (bool): If True, run IgPhyML", "changeo imports from presto.Defaults import default_out_args from presto.IO import printLog, printMessage, printWarning, printError,", "else: fails[\"seq_fail\"] -= clonesizes[str(k)] fails[\"minseq_fail\"] -= clonesizes[str(k)] fail_count = fails[\"rec_count\"] - pass_count #", "if specified required fields: sequence_id, sequence, sequence_alignment, germline_alignment_d_mask or germline_alignment, v_call, j_call, clone_id,", "nseqs = len(sequences) imgtar = clones[0].getField(\"imgtpartlabels\") germline = clones[0].getField(\"germline_imgt_d_mask\") if germline is \"\":", "meta_data, delim) if collapse and len(useqs_f) < min_seq: for seq_f, num in useqs_f.items():", "return -1 def rmCDR3(sequences, clones): \"\"\" Remove CDR3 from all sequences and germline", "[45] * len( regions[\"fwr2_imgt\"]) + \\ [60] * len(regions[\"cdr2_imgt\"]) + [80] * len(regions[\"fwr3_imgt\"])", "+ ki.count(\"C\") ncountj = kj.count(\"A\") + kj.count(\"T\") + kj.count(\"G\") + kj.count(\"C\") ambigchar[useqs[ki]] =", "qcodons[qpos], debug) qpos += 1 if qcodons[qpos-1] == scodons[ospos]: #if codon in previous", "logs = OrderedDict() fails = {\"rec_count\":0, \"seq_fail\":0, \"nf_fail\":0, \"del_fail\":0, \"in_fail\":0, \"minseq_fail\":0, \"other_fail\":0, \"region_fail\":0,", "+ qi spos = i break else: spos = i break return qi,", "!= len(seq2): printError(\"Sequences are not the same length! %s %s\" % (seq1, seq2))", "fails[\"seq_fail\"] += 1 fails[\"region_fail\"] += 1 return 0 else: #imgt_warn = \"\\n! IMGT", "Create intermediate sequence alignment and partition files for IgPhyML output Arguments: out_dir (str):", "starting position of IMGT sequence in input sequence. debug (bool) : print debugging", "\"\".join(ngermline)) sequences[i] = \"\".join(nseq) #print(\"Length: \" + str(ncdr3)) def characterizePartitionErrors(sequences, clones, meta_data): \"\"\"", "to sequence ID to ensure uniqueness.\"\"\") igphyml_group = parser.add_argument_group(\"IgPhyML arguments (see igphyml -h", "s_end, qpos, log, debug, recursive) if not log[\"PASS\"] and not recursive: log[\"FRAMESHIFTS\"] =", "r.dupcount is None: r.dupcount = 1 fails[\"rec_count\"] += 1 #printProgress(rec_count, rec_count, 0.05, start_time)", "clone (share indexes with clones parameter). meta_data (str): Field to append to sequence", "= nproc printLog(log) if not nohlp: try: #estimate HLP parameters/trees p = subprocess.check_output(hlp_args)", "\"\"\" # Define input and output field help message fields = dedent( \"\"\"", "command line arguments parser = getArgParser() checkArgs(parser) args = parser.parse_args() args_dict = parseCommonArgs(args)", "= joinseqs[k] log[rfrom.sequence_id][\"COLLAPSEFROM\"] = k log[rfrom.sequence_id][\"FAIL\"] = \"Collapsed with \" + rto.sequence_id del", "with different meta_data. clones (list) : list of receptor objects. collapse (bool) :", "with igphyml? Returns: str: modified IMGT gapped sequence. log: dict of sequence information", "outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt): \"\"\" Create", "dictionary of output pass and fail files. \"\"\" # Print parameter info log", "of sequences\", start_time=start_time, width=50) #subsampling loop init_clone_sizes = {} big_enough = [] all_records", "from sequence ID r.sequence_id = r.sequence_id.replace(\":\",\"-\") #remove colons from sequence ID r.sequence_id =", "ngerm.append(newgerm[j]) nimgt.append(imgt[j]) else: ncdr3 += 1 newseqs[i] = nseq newgerm = ngerm imgt", "split by alignment to IMGT reference Arguments: r (changeo.Receptor.Receptor): receptor object for a", "format %s.\" % format) out_fields = getDbFields(db_file, reader=reader) # open input file handle", "i break return qi, spos def checkFrameShifts(receptor, oqpos, ospos, log, debug): \"\"\" Checks", "log[\"FAIL\"] = fail_count log[\"NONFUNCTIONAL\"] = fails[\"nf_fail\"] log[\"FRAMESHIFT_DEL\"] = fails[\"del_fail\"] log[\"FRAMESHIFT_INS\"] = fails[\"in_fail\"] log[\"CLONETOOSMALL\"]", "width=50) log_handle = None if out_args[\"log_file\"] is not None: log_handle = open(out_args[\"log_file\"], \"w\")", "seq2[i] != \".\": if seq1[i] != seq2[i]: dist += 1 if fbreak: break", "resolveglines = True if resolveglines: printError(\"%s %s\" % (\"Predicted germlines are not the", "collapse (bool) : deduplicate sequences. nseqs (int): number of sequences. delim (str) :", "dest=\"clean\", type=str, default=\"none\", help=\"\"\"Delete intermediate files? none: leave all intermediate files; all: delete", "clones[i].setField(\"imgtpartlabels\",nimgtar) clones[i].setField(\"germline_imgt_d_mask\", \"\".join(ngermline)) sequences[i] = \"\".join(nseq) #print(\"Length: \" + str(ncdr3)) def characterizePartitionErrors(sequences, clones,", "for seq_rec in newseqs[j]])+delim+\":\".join(meta_data_list) else: conseq_f = conseq if conseq_f in useqs_f and", "= ki.count(\"A\") + ki.count(\"T\") + ki.count(\"G\") + ki.count(\"C\") ncountj = kj.count(\"A\") + kj.count(\"T\")", "\"--hotness\", hotness, \"--oformat\", oformat, \"--outname\", igphyml_out] if asr >= 0: hlp_args.append(\"--ASRc\") hlp_args.append(str(asr)) log", "1 sequence? imgtar, germline, sites, nseqs = characterizePartitionErrors(sequences, clones, meta_data) tallies = []", "\"\"\" for i in range(0,len(sequences)): imgtar = clones[i].getField(\"imgtpartlabels\") germline = clones[i].getField(\"germline_imgt_d_mask\") nseq =", "= len(sequences[j]) if len(clones[j].getField(\"imgtpartlabels\")) > maximgt: imgtar = clones[j].getField(\"imgtpartlabels\") maximgt = len(imgtar) sites", "str(len(sequences[i]))) #print(\"germline: \" + str(len(germline))) #if len(germline) < len(sequences[i]): # print(\"\\n\" + str(clones[i].sequence_id))", "nimgt #print(\"Length: \" + str(ncdr3)) useqs_f = OrderedDict() conseqs = [] for j", "\"none\": log = OrderedDict() log[\"START\"] = \"CLEANING\" log[\"SCOPE\"] = clean printLog(log) todelete =", "= receptor.sequence_imgt log[\"SEQ_MASKED\"] = concatenated_seq return concatenated_seq, log def unAmbigDist(seq1, seq2, fbreak=False): \"\"\"", "delete all tree file results.\\n\" \"You'll have to do that yourself.\") log =", "mout = maskSplitCodons(receptor, recursive=True) if mout[1][\"PASS\"]: #if debug: receptor.sequence_input = ros receptor.sequence_imgt =", "ospos printDebug(\"But codon was apparently preserved\", debug) if \"IN-FRAME\" in log: log[\"IN-FRAME\"] =", "IMGT sequence. log (dict) : log of information for each sequence. debug (bool)", "and seq2[i] != \"-\" and seq2[i] != \".\": if seq1[i] != seq2[i]: dist", "!= len(imgtar): correctseqs = True if correctseqs: maxlen = sites maximgt = len(imgtar)", "\" + str(ncdr3)) useqs_f = OrderedDict() conseqs = [] for j in range(0,", "qcodons[qpos] == \"N\": # possible that SEQ-IMGT ends on a bunch of Ns", "keys[j].split(delim) ri = receptors[useqs[ki]] rj = receptors[useqs[kj]] dist = unAmbigDist(ski, skj, True) m_match", "min_seq: for j in range(0, nseqs): logs[clones[j].sequence_id][\"FAIL\"] = \"Clone too small: \" +", "confidence interval\"\"\") igphyml_group.add_argument(\"-t\", action=\"store\", dest=\"kappa\", type=str, default=\"e\", choices=(\"e\", \"ce\"), help=\"\"\"Kappa parameters to estimate:", "called recursively? \"\"\" frameshifts = 0 while spos < s_end and qpos <", "log else: curgap = 0 si = nsi scodons = [si[i:i + 3]", "1 fails[\"germlineptc\"] += 1 return 0 if r.functional and ptcs < 0: #If", "= \"N\" + qi spos = i break else: spos = i break", "Presto and changeo imports from presto.Defaults import default_out_args from presto.IO import printLog, printMessage,", "meta_data is not None: for i in range(1, len(meta_data)): germ_id.append(\"GERM\") pass_handle.write(\"%s\\t%s\\t%s_%s\\t%s\\n\" % (outfile,", "i in range(0, len(newgerm)): clonef.write(\"%s\" % newgerm[i].replace(\".\",\"-\")) clonef.write(\"\\n\") #output partition file partfile =", "action=\"store\", dest=\"optimization\", type=str, default=\"lr\", choices=(\"n\",\"r\",\"l\",\"lr\",\"tl\",\"tlr\"), help=\"\"\"Optimize combination of topology (t) branch lengths (l)", "identical sequences with different meta_data. meta_data (str): Field to append to sequence IDs.", "igphyml_group.add_argument(\"--nproc\", action=\"store\", dest=\"nproc\", type=int, default=1, help=\"\"\"Number of threads to parallelize IgPhyML across.\"\"\") igphyml_group.add_argument(\"--clean\",", "can give misleading dupcount information if some sequences have ambiguous characters at polymorphic", "append to sequence IDs. Splits identical sequences with different meta_data. Returns: tuple: tuple", "help=\"\"\"Optimize combination of topology (t) branch lengths (l) and parameters (r), or nothing", "= None, None if out_args[\"failed\"]: fail_handle = getOutputHandle(db_file, out_label=\"lineages-fail\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=out_args[\"out_type\"]) fail_writer", "maximgt = len(imgtar) for j in range(0,len(sequences)): if len(sequences[j]) > maxlen: maxlen =", "in range(0, sites, 3): if i == 0: newseqs.append([]) if tallies[i//3] > 0:", "distance from the sequence they will be collapse to. if ncountj > ncounti:", "default_format from changeo.IO import splitName, getDbFields, getFormatOperators, getOutputHandle, getOutputName from changeo.Alignment import RegionDefinition", "if collapse and len(useqs_f) < min_seq: for seq_f, num in useqs_f.items(): logs[clones[num].sequence_id][\"FAIL\"] =", "commas from metadata md = md.replace(\":\",\"-\") #remove colons from metadata md = md.replace(\",\",\"-\")", "+= 1 elif i == 0 and curgap != 0: if curgap %", "+= 1 fails[\"nf_fail\"] += 1 return 0 # Run IgPhyML on outputed data", "input files for IgPhyML Arguments: db_file (str): input tab-delimited database file. meta_data (str):", "#remove commas from sequence ID r.sequence_id = r.sequence_id.replace(\":\",\"-\") #remove colons from sequence ID", "not nmask) if total == sample_depth: break # Start processing clones clonesizes =", "clone_dir = os.path.join(dir_name, clone_name) if not os.path.exists(clone_dir): os.makedirs(clone_dir) # Format options try: reader,", "False: printWarning(\"FUNCTIONAL column not found.\") found_no_funct = True all_records.append(r) if r.clone in init_clone_sizes:", "\"\"\" output files: <folder> folder containing fasta and partition files for each clone.", "#figure out if this was due to a frame-shift by repeating this method", "< len(ris): #cut out 1 or 2 nucleotides downstream of offending codon receptor.sequence_input", "output[\"pass\"] = pass_handle.name pass_handle.close() if fail_handle is not None: output[\"fail\"] = fail_handle.name fail_handle.close()", "FWR,CDR respectively: e = estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"-t\", action=\"store\",", "sequence Arguments: sequence (str): IMGT gapped sequence in frame 1. Returns: int: negative", "(bool) : print debugging statements. Returns: tuple: (modified input sequence, modified starting position", "sequence. log: dict of sequence information \"\"\" debug = False qi = receptor.sequence_input", "reader=reader) # open input file handle = open(db_file, \"r\") records = reader(handle) fail_handle,", "for i in range(0,len(sequences)): imgtar = clones[i].getField(\"imgtpartlabels\") germline = clones[i].getField(\"germline_imgt_d_mask\") nseq = []", "clones: if meta_data is not None: c.setField(meta_data[0],c.getField(meta_data_ar[0])) for m in range(1,len(meta_data_ar)): st =", "fails[\"rec_count\"] log[\"PASS\"] = pass_count log[\"FAIL\"] = fail_count log[\"NONFUNCTIONAL\"] = fails[\"nf_fail\"] log[\"FRAMESHIFT_DEL\"] = fails[\"del_fail\"]", "= conseq if conseq_f in useqs_f and collapse: clones[useqs_f[conseq_f]].dupcount += clones[j].dupcount logs[clones[j].sequence_id][\"PASS\"] =", "from observed data newgerm = [] imgt = [] for j in range(0,", "qi, spos def checkFrameShifts(receptor, oqpos, ospos, log, debug): \"\"\" Checks whether a frameshift", "arguments parser = getArgParser() checkArgs(parser) args = parser.parse_args() args_dict = parseCommonArgs(args) del args_dict[\"db_files\"]", "None, None if out_args[\"failed\"]: fail_handle = getOutputHandle(db_file, out_label=\"lineages-fail\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=out_args[\"out_type\"]) fail_writer =", "start_time=start_time, width=50) for k in clones.keys(): if len(clones[str(k)]) < min_seq: for j in", "(list): list of codons in input sequence spos (int): starting position of IMGT", ": Number of threads to parallelize IgPhyML across optimization (str): Optimize combination of", "file sequence headers.\"\"\") group.add_argument(\"--clones\", nargs=\"+\", action=\"store\", dest=\"target_clones\", help=\"\"\"List of clone IDs to output,", "None: log_handle.close() #printProgress(rec_count, rec_count, 0.05, start_time) log = OrderedDict() log[\"OUTPUT\"] = os.path.basename(pass_handle.name) if", "elif scodons[spos] == qcodons[qpos]: # if both are the same, move both forward", "split by alignment to IMGT reference, then produces input files for IgPhyML Arguments:", "(bool) : deduplicate sequences. nseqs (int): number of sequences. delim (str) : delimiter", "log[\"ID\"]=receptor.sequence_id log[\"CLONE\"]=receptor.clone log[\"PASS\"] = True if debug: print(receptor.sequence_id) # adjust starting position of", "omega (str): omega optimization in IgPhyML (--omega) kappa (str): kappa optimization in IgPhyML", "object. Returns: 0: returns 0 if an error occurs or masking fails. 1:", "[] for j in range(0, nseqs): conseq = \"\".join([str(seq_rec) for seq_rec in newseqs[j]])", "checkFrameShifts(receptor, oqpos, ospos, log, debug) elif spos >= s_end or qcodons[qpos] != scodons[spos]:", "be corrected\") for j in range(0,len(imgtar)): if c.getField(\"imgtpartlabels\")[j] != imgtar[j]: printError(\"IMGT assignments are", "= fails[\"rec_count\"] log[\"PASS\"] = pass_count log[\"FAIL\"] = fail_count log[\"NONFUNCTIONAL\"] = fails[\"nf_fail\"] log[\"FRAMESHIFT_DEL\"] =", "clone IDs to output, if specified.\"\"\") group.add_argument(\"--minseq\", action=\"store\", dest=\"min_seq\", type=int, default=1, help=\"\"\"Minimum number", "__, clone_name, __ = splitName(db_file) else: clone_name = out_args[\"out_name\"] if dir_name is None:", "fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"nf_fail\"] += 1 return 0 # Run IgPhyML on", "with (least ambiguous chars) joinseqs = {} # id -> useq to join", "if seq1[i] != \"N\" and seq1[i] != \"-\" and seq1[i] != \".\": if", "move both forward spos += 1 qpos += 1 elif qcodons[qpos] == \"N\":", "in range(3,len(names)-1): log[names[i]] = round(float(vals[i]),2) printLog(log) if clean != \"none\": log = OrderedDict()", "regions[\"fwr3_imgt\"] + nseq imgtpartlabels = [13] * len(regions[\"fwr1_imgt\"]) + [30] * len(regions[\"cdr1_imgt\"]) +", "len(qcodons) and spos < s_end and scodons[spos] != qcodons[qpos]: printDebug(\"Checking \" + scodons[spos]+", "= regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] nseq =", "!= qi[3:6]: qi = \"N\" + qi spos = i break else: spos", "\"w\") for j in logs.keys(): printLog(logs[j], handle=log_handle) pass_handle.write(str(nclones)+\"\\n\") for key in sorted(clonesizes, key=clonesizes.get,", "\\ [60] * len(regions[\"cdr2_imgt\"]) + [80] * len(regions[\"fwr3_imgt\"]) + \\ [108] * int(len(nseq))", "IMGT regions are provided, record their positions rd = RegionDefinition(r.junction_length, amino_acid=False) regions =", "predict germlines using --cloned\")) if sites > (len(germline)): seqdiff = sites - len(germline)", "= md.replace(\":\",\"-\") #remove colons from metadata md = md.replace(\",\",\"-\") #remove commas from metadata", "two sequences counting only A,T,C,Gs Arguments: seq1 (str): sequence 1 seq2 (str): sequence", "ins, debug) log[\"FAIL\"] = \"SINGLE FRAME-SHIFTING INSERTION\" break else: receptor.sequence_input = ros receptor.sequence_imgt", "s_end, qpos, log, debug, recursive=False): \"\"\" Find and mask split codons Arguments: receptor", "arguments for output preferences. Returns: dict: dictionary of output pass and fail files.", "if resolveglines: printError(\"%s %s\" % (\"Predicted germlines are not the same among sequences", "be excluded.\"\"\") group.add_argument(\"--sample\", action=\"store\", dest=\"sample_depth\", type=int, default=-1, help=\"\"\"Depth of reads to be subsampled", "clone. imgt (list) : IMGT numbering of clonal positions . \"\"\" # bootstrap", "fail_writer, mask = not nmask) if total == sample_depth: break # Start processing", "printMessage(\"Processing clones\", start_time=start_time, width=50) for k in clones.keys(): if len(clones[str(k)]) < min_seq: for", "rto = receptors[join[useqs[k]]] rto.dupcount += rfrom.dupcount if log is not None: log[rfrom.sequence_id][\"PASS\"] =", "debug: print(scodons[spos] + \"\\t\" + qcodons[qpos]) if scodons[spos] == \"...\" and qcodons[qpos] !=", "subprocess.CalledProcessError as e: print(\" \".join(gy_args)) print('error>', e.output, '<') printError(\"GY94 tree building in IgPhyML", "partf.write(\"%s\\n\" % (clones[0].j_call.split(\"*\")[0])) partf.write(\",\".join(map(str, imgt))) partf.write(\"\\n\") def outputIgPhyML(clones, sequences, meta_data=None, collapse=False, ncdr3=False, logs=None,", "for i in range(0, len(newseqs)): nseq = [] ncdr3 = 0 for j", "meta_data_list.append(clones[j].getField(m).replace(\":\", \"_\")) cid = delim + str(delim.join(meta_data_list)) sid = clones[j].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\"", "igf = open(igphyml_out) names = igf.readline().split(\"\\t\") vals = igf.readline().split(\"\\t\") for i in range(3,len(names)-1):", "(collections.OrderedDict): log of sequence errors. meta_data (str): Field to append to sequence IDs.", "and len(scodons[i]) == 3 and scodons[i] != \"NNN\": s_end = i printDebug(\"%i:%i:%s\" %", "ros[0:(psite + 3)] + ros[(psite + 3 + ins):] receptor.sequence_imgt = ris[0:(pisite +", "r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID r.sequence_id = r.sequence_id.replace(\":\",\"-\") #remove colons", "\"-m\", \"GY\", \"--run_id\", \"gy\", \"--outrep\", outrep, \"--threads\", str(nproc),\"--outname\",gyout] hlp_args = [\"igphyml\",\"--repfile\", outrep, \"-m\",", "= {} # id -> sequence id to join with (least ambiguous chars)", "and m_match: ncounti = ki.count(\"A\") + ki.count(\"T\") + ki.count(\"G\") + ki.count(\"C\") ncountj =", "\"%s.fasta\" % key) partfile = os.path.join(clone_dir, \"%s.part.txt\" % key) if clonesizes[key] > 0:", "only sites from observed data newgerm = [] imgt = [] for j", "IMGT sequence ncon_seq = \"\" counter = 0 for i in gaps: #print(str(i)", "fail_writer = writer(fail_handle, fields=out_fields) cloneseqs = {} clones = {} logs = OrderedDict()", "== \".\": scodons[-1] = \"...\" else: scodons[-1] = \"NNN\" if \"END-MASKED\" in log:", "gptcs = hasPTC(r.getField(\"germline_imgt_d_mask\")) if gptcs >= 0: log = OrderedDict() log[\"ID\"] = r.sequence_id", "found :-/\") try: #get GY94 starting topologies p = subprocess.check_output(gy_args) except subprocess.CalledProcessError as", "list. receptors (dict): receptors within a clone (index is value in useqs dict).", "nseqs): if sequences[j][i:(i + 3)] != \"...\": tally += 1 tallies.append(tally) newseqs =", "\"-\"))) if nseqs == 1 and duplicate: if meta_data is not None: if", "= parseCommonArgs(args) del args_dict[\"db_files\"] # Call main for each input file for f", "different meta_data. delim (str): delimited to use when appending meta_data. Returns: list: deduplicated", "commas from metadata md = md.replace(\")\",\"-\") #remove parenthesis from metadata md = md.replace(\"(\",\"-\")", ": starting position of IMGT sequence in input sequence. debug (bool) : print", "[r] cloneseqs[r.clone] = [mask_seq] return 1 else: if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1", "through list of joined sequences and collapse keys = list(useqs.keys()) for k in", "sequence? imgtar, germline, sites, nseqs = characterizePartitionErrors(sequences, clones, meta_data) tallies = [] for", "+ regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + nseq imgtpartlabels = [13] *", "conseq_f = conseq if conseq_f in useqs_f and collapse: clones[useqs_f[conseq_f]].dupcount += clones[j].dupcount logs[clones[j].sequence_id][\"PASS\"]", "delete all intermediate files.\"\"\") igphyml_group.add_argument(\"--optimize\", action=\"store\", dest=\"optimization\", type=str, default=\"lr\", choices=(\"n\",\"r\",\"l\",\"lr\",\"tl\",\"tlr\"), help=\"\"\"Optimize combination of", "__name__ == \"__main__\": \"\"\" Parses command line arguments and calls main \"\"\" #", "previous position is equal to original codon, it was preserved qpos -= 1", "if meta_data is not None: matches = 0 for m in meta_data: if", "Cannot run CDR/FWR partitioned model on this data.\\n\" imgtpartlabels = [0] * len(r.sequence_imgt)", "sequence debug (bool): print debugging statements? recursive (bool): was this function called recursively?", "codons split by indels start_time = time() printMessage(\"Correcting frames and indels of sequences\",", "min_seq=1): \"\"\" Create intermediate sequence alignment and partition files for IgPhyML output Arguments:", "codon over in the #alignment if scodons[i][2:3] != qi[2:3] or scodons[i + 1]", "append to sequence IDs. Splits identical sequences with different meta_data. clones (list) :", "random import subprocess import multiprocessing as mp from argparse import ArgumentParser from collections", "None: if append is not None: for m in append: r.sequence_id = r.sequence_id", "maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer, mask = not nmask) if total", "0 for j in range(0, len(imgt)): if imgt[j] != 108: nseq.append(newseqs[i][j]) if i", "not attempt to mask split codons.\"\"\") group.add_argument(\"--md\", nargs=\"+\", action=\"store\", dest=\"meta_data\", help=\"\"\"List of fields", "sequences with different meta_data. meta_data (str): Field to append to sequence IDs. Splits", "\"\" if meta_data is not None: seq, cid = seq_f.split(delim) cid = delim", "len(regions[\"fwr4_imgt\"]) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or simgt != r.sequence_imgt: log =", "found? Returns: int: number of ACGT differences. \"\"\" if len(seq1) != len(seq2): printError(\"Sequences", "to estimate for FWR,CDR respectively: e = estimate, ce = estimate + confidence", "is not None: matches = 0 for m in meta_data: if ri.getField(m) ==", "+ \"\\t\" + qcodons[qpos]) if scodons[spos] == \"...\" and qcodons[qpos] != \"...\": #if", "clones, logs, meta_data, delim) if collapse and len(useqs_f) < min_seq: for seq_f, num", "+= 1 qpos = 0 if mask: findAndMask(receptor, scodons, qcodons, spos, s_end, qpos,", "+ 3 + ins):] receptor.sequence_imgt = ris[0:(pisite + 3)] + ris[(pisite + 3):]", "{\"rec_count\":0, \"seq_fail\":0, \"nf_fail\":0, \"del_fail\":0, \"in_fail\":0, \"minseq_fail\":0, \"other_fail\":0, \"region_fail\":0, \"germlineptc\":0, \"fdcount\":0, \"totalreads\":0, \"passreads\":0, \"failreads\":0}", "False for c in clones: ngermline = c.getField(\"germline_imgt_d_mask\") if ngermline is \"\": ngermline", "clean=\"none\"): \"\"\" Run IgPhyML on outputted data Arguments: outfile (str): Output file name.", "collapse printLog(log) # Open output files out_label = \"lineages\" pass_handle = getOutputHandle(db_file, out_label=out_label,", "first sequence in clones, the germline sequence of the first receptor in clones,", "log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"SEQ_IN\"] = r.sequence_input log[\"SEQ_IMGT\"] = r.sequence_imgt logs[r.sequence_id]", "debug) log[\"FAIL\"] = \"SINGLE FRAME-SHIFTING INSERTION\" break else: receptor.sequence_input = ros receptor.sequence_imgt =", "= i printDebug(\"%i:%i:%s\" % (s_end, len(scodons), scodons[s_end]), debug) s_end += 1 qpos =", "attempt to mask split codons.\"\"\") group.add_argument(\"--md\", nargs=\"+\", action=\"store\", dest=\"meta_data\", help=\"\"\"List of fields to", "number ATCG nucleotides for i in range(0,len(keys)-1): for j in range(i+1,len(keys)): ki =", "findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log, debug, recursive) if not log[\"PASS\"] and", "len(qcodons) and spos < s_end: printDebug(\"FAILING MATCH\", debug) log[\"PASS\"] = False #if no", "len(clones[str(k)]) < min_seq: for j in range(0, len(clones[str(k)])): logs[clones[str(k)][j].sequence_id][\"FAIL\"] = \"Clone too small:", "# if not the same, mask IMGT at that site and scan forward", "[si[i:i + 3] for i in range(0, len(si), 3)] # deal with the", "(str): Output file name. igphymlout (str): igphyml output file nproc (int): Number of", "nseqs): cid = \"\" if meta_data is not None: meta_data_list = [] for", "if scodons[i] != \"...\" and len(scodons[i]) == 3 and scodons[i] != \"NNN\": s_end", "if i == 0: newseqs.append([]) if tallies[i//3] > 0: newseqs[j].append(sequences[j][i:(i+3)]) lcodon = \"\"", "log[\"FAIL\"] = \"FRAME-SHIFTING DELETION\" log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = receptor.sequence_imgt", "the distance between two sequences counting only A,T,C,Gs Arguments: seq1 (str): sequence 1", "have to shift the frame. This attempts to correct for this by looking", "% (scodons[ospos], ospos), debug) if \"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\"", "for i in range(0,len(si)): if si[i] == \"-\": gaps.append(1) ndotgaps.append(1) else: gaps.append(0) nsi", "omega=omega, kappa=kappa, motifs=motifs, hotness=hotness, oformat=oformat, nohlp=nohlp,clean=clean,asr=asr) return output def getArgParser(): \"\"\" Defines the", "\"N\" and seq1[i] != \"-\" and seq1[i] != \".\": if seq2[i] != \"N\"", "concatenated_seq, log def unAmbigDist(seq1, seq2, fbreak=False): \"\"\" Calculate the distance between two sequences", "are provided, record their positions rd = RegionDefinition(r.junction_length, amino_acid=False) regions = rd.getRegions(r.sequence_imgt) if", "IMGT sequence log (dict): log of information for each sequence debug (bool): print", "printError(\"%s\\n%s\\n\" % (sequences[j],clones[j].getField(\"imgtpartlabels\")),False) printError(\"ChangeO file needs to be corrected\") for j in range(0,len(imgtar)):", "\"...\": tally += 1 tallies.append(tally) newseqs = [] # remove gap only sites", "maskSplitCodons(r, mask=mask) mask_seq = mout[0] ptcs = hasPTC(mask_seq) if ptcs >= 0: printWarning(\"Masked", "motifs to estimate mutability.\"\"\") igphyml_group.add_argument(\"--hotness\", action=\"store\", dest=\"hotness\", type=str, default=\"e,e,e,e,e,e\", help=\"\"\"Mutability parameters to estimate:", "IDs to analyze. collapse (bool): if True collapse identical sequences. ncdr3 (bool): if", "INSERTION\": fails[\"in_fail\"] += 1 else: fails[\"other_fail\"] += 1 else: log = OrderedDict() log[\"ID\"]", "+ [45]*len(regions[\"fwr2_imgt\"]) + \\ [60]*len(regions[\"cdr2_imgt\"]) + [80]*len(regions[\"fwr3_imgt\"]) + [108] * len(regions[\"cdr3_imgt\"]) + \\", "the same length! %s %s\" % (seq1, seq2)) dist = 0 for i", "= 0 if useqs[kj] in join: ncj = ambigchar[join[useqs[kj]]] if ncj < ncounti:", "partition files for each clone. lineages successfully processed records. lineages-fail database records failed", "first sequence in clones, and the number of sequences in clones. \"\"\" sites", "\"ce\"), help=\"\"\"Kappa parameters to estimate: e = estimate, ce = estimate + confidence", "= [] for i in range(0, sites, 3): tally = 0 for j", "\"_igphyml_stats_gy.txt\" gy_args = [\"igphyml\", \"--repfile\", outfile, \"-m\", \"GY\", \"--run_id\", \"gy\", \"--outrep\", outrep, \"--threads\",", "= sequences[seqi] if len(i) != sites or len(clones[seqi].getField(\"imgtpartlabels\")) != len(imgtar): correctseqs = True", ": mask split codons for use with igphyml? Returns: str: modified IMGT gapped", "% format) out_fields = getDbFields(db_file, reader=reader) # open input file handle = open(db_file,", "rfrom = receptors[useqs[k]] rto = receptors[join[useqs[k]]] rto.dupcount += rfrom.dupcount if log is not", "< s_end and scodons[spos] == \"...\": #possible next codon is just a gap", "(scodons[i], qi[0:3]), debug) if scodons[i] != \"...\": if scodons[i][0:2] == \"..\": scodons[i] =", "dict: dictionary of output pass and fail files. \"\"\" # Print parameter info", "printLog(log) try: #check for igphyml executable subprocess.check_output([\"igphyml\"]) except: printError(\"igphyml not found :-/\") try:", "# Parse command line arguments parser = getArgParser() checkArgs(parser) args = parser.parse_args() args_dict", "out_name= out_args[\"out_name\"], out_type=\"tsv\") igphyml_out = None if igphyml: igphyml_out = getOutputName(db_file, out_label=\"igphyml-pass\", out_dir=out_args[\"out_dir\"],", "action=\"store_true\", dest=\"ncdr3\", help=\"\"\"If specified, remove CDR3 from all sequences.\"\"\") group.add_argument(\"--nmask\", action=\"store_true\", dest=\"nmask\", help=\"\"\"If", "frameshifts = 0 while spos < s_end and qpos < len(qcodons): if debug:", "igphymlout (str): igphyml output file nproc (int): Number of threads to parallelize IgPhyML", "junction region by IMGT definition. Arguments: receptor (changeo.Receptor.Receptor): Receptor object. recursive (bool) :", "and seq1[i] != \".\": if seq2[i] != \"N\" and seq2[i] != \"-\" and", "mask=True): \"\"\" Masks codons split by alignment to IMGT reference Arguments: r (changeo.Receptor.Receptor):", "printError(\"Invalid format %s.\" % format) out_fields = getDbFields(db_file, reader=reader) # open input file", "if not log[\"PASS\"] and not recursive: log[\"FRAMESHIFTS\"] = frameshifts if len(scodons[-1]) != 3:", "if an error occurs or masking fails. 1: returns 1 masking succeeds \"\"\"", "for ins in range(1, 3): ros = receptor.sequence_input ris = receptor.sequence_imgt psite =", "if \"IN-FRAME\" in log: log[\"IN-FRAME\"] = log[\"IN-FRAME\"] + \",\" + str(spos) else: log[\"IN-FRAME\"]", "scodons[i][1:3] != qi[1:3] or scodons[i+1] != qi[3:6]: qi = \"N\" + qi spos", "out_type=out_args[\"out_type\"]) fail_writer = writer(fail_handle, fields=out_fields) cloneseqs = {} clones = {} logs =", "run IgPhyML on outputted data nproc (int) : Number of threads to parallelize", "\"w\") as partf: partf.write(\"%d %d\\n\" % (2, len(newgerm))) partf.write(\"FWR:IMGT\\n\") partf.write(\"CDR:IMGT\\n\") partf.write(\"%s\\n\" % (clones[0].v_call.split(\"*\")[0]))", "+ \"_gy.tsv\" gyout = outfile + \"_igphyml_stats_gy.txt\" gy_args = [\"igphyml\", \"--repfile\", outfile, \"-m\",", "= fails[\"totalreads\"] log[\"INITIAL_FILTER\"] = fails[\"rec_count\"] log[\"PASS\"] = pass_count log[\"FAIL\"] = fail_count log[\"NONFUNCTIONAL\"] =", "True if resolveglines: printError(\"%s %s\" % (\"Predicted germlines are not the same among", "and parameters format (str): input and output format. out_args (dict): arguments for output", "printLog(log) #Run IgPhyML on outputted data? if igphyml: runIgPhyML(pass_handle.name, igphyml_out=igphyml_out, clone_dir=clone_dir, nproc=nproc, optimization=optimization,", "the next codon over in the #alignment if scodons[i][2:3] != qi[2:3] or scodons[i", "igphyml_out = getOutputName(db_file, out_label=\"igphyml-pass\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=oformat) dir_name, __ = os.path.split(pass_handle.name) if out_args[\"out_name\"]", "to IMGT reference Arguments: r (changeo.Receptor.Receptor): receptor object for a particular sequence. clones", "= clones[j].getField(\"imgtpartlabels\") seqdiff = maxlen - len(sequences[j]) imgtdiff = len(imgtar)-len(cimgt) sequences[j] = sequences[j]", "printError(c.getField(\"imgtpartlabels\"), False) printError(\"%s\\n%d\\n\" % (imgtar, j)) #Resolve germline if there are differences, e.g.", "sites % 3 != 0: printError(\"number of sites must be divisible by 3!", "sequences per clone append (str): column name to append to sequence_id igphyml (bool):", "help=\"\"\"Delete intermediate files? none: leave all intermediate files; all: delete all intermediate files.\"\"\")", "= None if out_args[\"log_file\"] is not None: log_handle = open(out_args[\"log_file\"], \"w\") for j", "of reads to be subsampled (before deduplication).\"\"\") group.add_argument(\"--append\", nargs=\"+\", action=\"store\", dest=\"append\", help=\"\"\"List of", "min_seq=1,append=None, igphyml=False, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", clean=\"none\", nohlp=False, asr=-1, format=default_format,", "log[\"HOTNESS\"] = hotness log[\"NPROC\"] = nproc printLog(log) if not nohlp: try: #estimate HLP", "r.sequence_id log[\"CLONE\"] = r.clone log[\"SEQ_IN\"] = r.sequence_input log[\"SEQ_IMGT\"] = r.sequence_imgt logs[r.sequence_id] = log", "3] for i in range(0, len(qi), 3)] frameshifts = 0 s_end = 0", "{} pass_count, nclones = 0, 0 printMessage(\"Processing clones\", start_time=start_time, width=50) for k in", "partition files for IgPhyML output Arguments: out_dir (str): directory for sequence files. useqs_f", "= receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = concatenated_seq return concatenated_seq, log def unAmbigDist(seq1,", "= ncon_seq + \".\" elif i == 0: ncon_seq = ncon_seq + concatenated_seq[counter]", "columns not detected.\\n! Cannot run CDR/FWR partitioned model on this data.\\n\" imgtpartlabels =", "= OrderedDict() log[\"START\"] = \"BuildTrees\" log[\"FILE\"] = os.path.basename(db_file) log[\"COLLAPSE\"] = collapse printLog(log) #", "log[\"wFWR,wCDR\"] = omega log[\"MOTIFS\"] = motifs log[\"HOTNESS\"] = hotness log[\"NPROC\"] = nproc printLog(log)", "+ regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] nseq = r.sequence_imgt[len(simgt):len(r.sequence_imgt)] if len(simgt) < len(r.sequence_imgt): simgt", "sequence in input sequence). \"\"\" spos = 0 for i in range(0, len(scodons)):", "printDebug(\"Masked %s at position %d\" % (scodons[ospos], ospos), debug) scodons[ospos] = \"NNN\" if", "if qcodons[qpos-1] == scodons[ospos]: #if codon in previous position is equal to original", "for i in range(0,len(seq1)): if seq1[i] != \"N\" and seq1[i] != \"-\" and", "their positions rd = RegionDefinition(r.junction_length, amino_acid=False) regions = rd.getRegions(r.sequence_imgt) if regions[\"cdr3_imgt\"] is not", "None if igphyml: igphyml_out = getOutputName(db_file, out_label=\"igphyml-pass\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=oformat) dir_name, __ =", "model on this data.\\n\" imgtpartlabels = [0] * len(r.sequence_imgt) r.setField(\"imgtpartlabels\", imgtpartlabels) mout =", "random.shuffle(big_enough) total = 0 for r in big_enough: if r.functional is None: r.functional", "sequences[i] = \"\".join(nseq) #print(\"Length: \" + str(ncdr3)) def characterizePartitionErrors(sequences, clones, meta_data): \"\"\" Characterize", "oformat=\"tab\", nohlp=False, asr=-1, clean=\"none\"): \"\"\" Run IgPhyML on outputted data Arguments: outfile (str):", "+ str(len(germline))) #if len(germline) < len(sequences[i]): # print(\"\\n\" + str(clones[i].sequence_id)) # print(\"\\n \"", "headers.\"\"\") group.add_argument(\"--clones\", nargs=\"+\", action=\"store\", dest=\"target_clones\", help=\"\"\"List of clone IDs to output, if specified.\"\"\")", "meta_data=None, delim=\":\"): \"\"\" Collapses identical sequences Argument: useqs (dict): unique sequences within a", "= [\"GERM\"] if meta_data is not None: for i in range(1,len(meta_data)): germ_id.append(\"GERM\") clonef.write(\">%s_%s\\n\"", "= [] for j in range(0, nseqs): for i in range(0, sites, 3):", "is not None: meta_data_list = [] for m in meta_data: meta_data_list.append(clones[j].getField(m).replace(\":\", \"_\")) cid", "pass and fail files. \"\"\" # Print parameter info log = OrderedDict() log[\"START\"]", "output files: <folder> folder containing fasta and partition files for each clone. lineages", "of codons in IMGT sequence. qi (str) : input sequence. spos (int) :", "else: ncdr3 += 1 newseqs[i] = nseq newgerm = ngerm imgt = nimgt", "False logs[r.sequence_id][\"FAIL\"] = \"Germline PTC\" fails[\"seq_fail\"] += 1 fails[\"germlineptc\"] += 1 return 0", "to mask split codons sample_depth (int): depth of subsampling before deduplication min_seq (int):", "tallies.append(tally) newseqs = [] # remove gap only sites from observed data newgerm", "== 1: newgerm[-1] = newgerm[-1] + \"NN\" if ncdr3: ngerm = [] nimgt", "== 0: ngerm.append(newgerm[j]) nimgt.append(imgt[j]) else: ncdr3 += 1 newseqs[i] = nseq newgerm =", "checkFrameShifts(receptor, oqpos, ospos, log, debug): \"\"\" Checks whether a frameshift occured in a", "is \"\": germline = clones[0].getField(\"germline_imgt\") correctseqs = False for seqi in range(0, len(sequences)):", "\"\".join([str(seq_rec) for seq_rec in newseqs[j]])+delim+\":\".join(meta_data_list) else: conseq_f = conseq if conseq_f in useqs_f", "If True, run IgPhyML on outputted data nproc (int) : Number of threads", "If True, only estimate GY94 trees and parameters format (str): input and output", "log[\"PASS\"] = False #if no match for the adjacent codon was found, something\"s", "+ str(len(imgtar))) #print(\"seqlen: \" + str(len(sequences[i]))) #print(\"germline: \" + str(len(germline))) #if len(germline) <", "is not None): for m in range(0,len(meta_data)): md = r.getField(meta_data[m]) md = md.replace(\",\",\"-\")", "#remove parenthesis from metadata md = md.replace(\"(\",\"-\") #remove parenthesis from metadata r.setField(meta_data[m],md) if", "[120] * len(regions[\"fwr4_imgt\"]) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or simgt != r.sequence_imgt:", "qcodons = [qi[i:i + 3] for i in range(0, len(qi), 3)] frameshifts =", "log[\"FAIL\"]=\"FAILED_MATCH:\"+str(spos) elif qcodons[qpos] == scodons[spos]: printDebug(\"Masked %s at position %d\" % (scodons[ospos], ospos),", "argparse import ArgumentParser from collections import OrderedDict from textwrap import dedent from time", "#print(\"Length: \" + str(ncdr3)) def characterizePartitionErrors(sequences, clones, meta_data): \"\"\" Characterize potential mismatches between", "clones[i].getField(\"germline_imgt_d_mask\") nseq = [] nimgtar = [] ngermline = [] ncdr3 = 0", "spos < s_end and qpos < len(qcodons): if debug: print(scodons[spos] + \"\\t\" +", "useqs[k] return useqs def hasPTC(sequence): \"\"\" Determines whether a PTC exits in a", "+ \"_igphyml_CIlog.txt_hlp\" if os.path.isfile(cilog): os.remove(cilog) if oformat == \"tab\": os.rmdir(clone_dir) else: printWarning(\"Using --clean", "of IMGT sequence in input sequence. debug (bool) : print debugging statements. Returns:", "for i in range(1,len(meta_data)): germ_id.append(\"GERM\") clonef.write(\">%s_%s\\n\" % (clones[0].clone,\"_\".join(germ_id))) for i in range(0, len(newgerm)):", "outrep + \"_igphyml_CIlog.txt_hlp\" if os.path.isfile(cilog): os.remove(cilog) if oformat == \"tab\": os.rmdir(clone_dir) else: printWarning(\"Using", "output format for IgPhyML (tab or txt) clean (str): delete intermediate files? (none,", "as a negative if clonesizes[str(k)] > 0: nclones += 1 pass_count += clonesizes[str(k)]", "if si[i] == \"-\": gaps.append(1) ndotgaps.append(1) else: gaps.append(0) nsi = nsi + si[i]", "1 return 0 # Run IgPhyML on outputed data def runIgPhyML(outfile, igphyml_out, clone_dir,", "sequence processing failures. out_args (dict): arguments for output preferences. fail_writer (changeo.IO.TSVWriter): failed sequences", "= r.sequence_input log[\"SEQ_IMGT\"] = r.sequence_imgt logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] =", "= delim + \"0\" sid = clones[num].sequence_id.translate(transtable) + \"_1\" + cid clonef.write(\">%s\\n%s\\n\" %", "= Seq(\"\") for i in scodons: concatenated_seq += i # add --- gaps", "break return qi, spos def checkFrameShifts(receptor, oqpos, ospos, log, debug): \"\"\" Checks whether", "identical sequences with different meta_data collapse (bool): if True collapse identical sequences. ncdr3", "to join with (least ambiguous chars) joinseqs = {} # id -> useq", "(list): list of sequences in clones. clones (list): list of Receptor objects. meta_data", "#passreads += r.dupcount if r.clone in clones: clones[r.clone].append(r) cloneseqs[r.clone].append(mask_seq) else: clones[r.clone] = [r]", "mask: findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log, debug, recursive) if not log[\"PASS\"]", "for i in range(3,len(names)-1): log[names[i]] = round(float(vals[i]),2) printLog(log) if clean != \"none\": log", "+ [30]*len(regions[\"cdr1_imgt\"]) + [45]*len(regions[\"fwr2_imgt\"]) + \\ [60]*len(regions[\"cdr2_imgt\"]) + [80]*len(regions[\"fwr3_imgt\"]) + [108] * len(regions[\"cdr3_imgt\"])", "seq2)) dist = 0 for i in range(0,len(seq1)): if seq1[i] != \"N\" and", "range(0,len(sequences)): if len(sequences[j]) > maxlen: maxlen = len(sequences[j]) if len(clones[j].getField(\"imgtpartlabels\")) > maximgt: imgtar", "clone sequences. logs (dict): contains log information for each sequence. fails (dict): counts", "\"\"\" Masks codons split by alignment to IMGT reference, then produces input files", "maxlen: maxlen = len(sequences[j]) if len(clones[j].getField(\"imgtpartlabels\")) > maximgt: imgtar = clones[j].getField(\"imgtpartlabels\") maximgt =", "specified, collapse identical sequences before exporting to fasta.\"\"\") group.add_argument(\"--ncdr3\", action=\"store_true\", dest=\"ncdr3\", help=\"\"\"If specified,", "Print parameter info log = OrderedDict() log[\"START\"] = \"BuildTrees\" log[\"FILE\"] = os.path.basename(db_file) log[\"COLLAPSE\"]", "different meta_data. meta_data (str): Field to append to sequence IDs. Splits identical sequences", "+ \",\" + str(spos) else: log[\"MASKED\"] = str(spos) else: log[\"PASS\"] = False log[\"FAIL\"]", "log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = receptor.sequence_imgt return receptor.sequence_imgt, log else:", "None: c.setField(meta_data[0],c.getField(meta_data_ar[0])) for m in range(1,len(meta_data_ar)): st = c.getField(meta_data[0])+\":\"+c.getField(meta_data_ar[m]) c.setField(meta_data[0],st) if len(c.getField(\"imgtpartlabels\")) !=", "return output def getArgParser(): \"\"\" Defines the ArgumentParser Returns: argparse.ArgumentParser: argument parsers. \"\"\"", "(list): sequences within the same clone (share indexes with clones parameter). meta_data (str):", "= nsi scodons = [si[i:i + 3] for i in range(0, len(si), 3)]", "len(seq2): printError(\"Sequences are not the same length! %s %s\" % (seq1, seq2)) dist", "\"\"\" Create intermediate sequence alignment and partition files for IgPhyML output Arguments: out_dir", "ID. newgerm (str) : modified germline of clonal lineage. conseqs (list) : consensus", "log: log[\"MASKED\"] = log[\"MASKED\"] + \",\" + str(spos) else: log[\"MASKED\"] = str(spos) else:", "for each sequence debug (bool): print debugging statements? recursive (bool): was this function", "just cut off first letter if non-match, at which point we\"ll just want", "= seq_f cid = \"\" if meta_data is not None: seq, cid =", "igphyml_group.add_argument(\"--optimize\", action=\"store\", dest=\"optimization\", type=str, default=\"lr\", choices=(\"n\",\"r\",\"l\",\"lr\",\"tl\",\"tlr\"), help=\"\"\"Optimize combination of topology (t) branch lengths", "end of subject sequence\" % (scodons[ospos], ospos), debug) if \"END-MASKED\" in log: log[\"END-MASKED\"]", "= True if debug: print(receptor.sequence_id) # adjust starting position of query sequence qi", "sequences (list): sequences within the same clone (share indexes with clones parameter). meta_data", "\"txt\"), help=\"\"\"IgPhyML output format.\"\"\") igphyml_group.add_argument(\"--nohlp\", action=\"store_true\", dest=\"nohlp\", help=\"\"\"Don't run HLP model?\"\"\") igphyml_group.add_argument(\"--asr\", action=\"store\",", "\"\" counter = 0 for i in gaps: #print(str(i) + \":\" + ncon_seq)", "len(meta_data)): germ_id.append(\"GERM\") pass_handle.write(\"%s\\t%s\\t%s_%s\\t%s\\n\" % (outfile, \"N\", key,\"_\".join(germ_id), partfile)) handle.close() output = {\"pass\": None,", "frame-shift by repeating this method but with an edited input sequence if not", "> 0: germ_id = [\"GERM\"] if meta_data is not None: for i in", "in range(0, nseqs): for i in range(0, sites, 3): if i == 0:", "= line.rstrip(\"\\n\") line = line.rstrip(\"\\r\") lsplit = line.split(\"\\t\") if len(lsplit) == 4: os.remove(lsplit[0])", "IMGT positions for first sequence in clones, the germline sequence of the first", "+ cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) if nseqs == 1 and duplicate:", "regions are provided, record their positions rd = RegionDefinition(r.junction_length, amino_acid=False) regions = rd.getRegions(r.sequence_imgt)", "(sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) else: for j in range(0, nseqs): cid = \"\" if", "== len(meta_data)) if dist == 0 and m_match: ncounti = ki.count(\"A\") + ki.count(\"T\")", "simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] +", "= [] for j in range(0, nseqs): conseq = \"\".join([str(seq_rec) for seq_rec in", "= getCommonArgParser(out_file=False, log=True, format=True) # Define argument parser parser = ArgumentParser(description=__doc__, epilog=fields, parents=[parser_parent],", "(\"TAA\", \"TGA\", \"TAG\", \"TRA\", \"TRG\", \"TAR\", \"TGR\", \"TRR\") for i in range(0, len(sequence),", "% (clones[0].j_call.split(\"*\")[0])) partf.write(\",\".join(map(str, imgt))) partf.write(\"\\n\") def outputIgPhyML(clones, sequences, meta_data=None, collapse=False, ncdr3=False, logs=None, fail_writer=None,", "= characterizePartitionErrors(sequences, clones, meta_data) tallies = [] for i in range(0, sites, 3):", "sequence. qi (str) : input sequence. spos (int) : starting position of IMGT", "= log[\"END-MASKED\"] + \",\" + str(spos) else: log[\"END-MASKED\"] = str(spos) else: printDebug(\"Masked %s", "colons from metadata md = md.replace(\",\",\"-\") #remove commas from metadata md = md.replace(\")\",\"-\")", "output format. out_args (dict): arguments for output preferences. Returns: dict: dictionary of output", "tallies[i//3] > 0: newseqs[j].append(sequences[j][i:(i+3)]) lcodon = \"\" for i in range(0, sites, 3):", "ospos (int) : position of interest in IMGT sequence. log (dict) : log", "number of clones. \"\"\" s = \"\" delim = \"_\" duplicate = True", "a clone. imgt (list) : IMGT numbering of clonal positions . \"\"\" #", "subprocess.check_output(hlp_args) except subprocess.CalledProcessError as e: print(\" \".join(hlp_args)) print('error>', e.output, '<') printError(\"HLP tree building", "from changeo.Defaults import default_format from changeo.IO import splitName, getDbFields, getFormatOperators, getOutputHandle, getOutputName from", "of a clone Arguments: sequences (list): list of sequences in clones. clones (list):", "IMGT reference Arguments: r (changeo.Receptor.Receptor): receptor object for a particular sequence. clones (list):", "default=\"e,e\", choices = (\"e\", \"ce\", \"e,e\", \"ce,e\", \"e,ce\", \"ce,ce\"), help=\"\"\"Omega parameters to estimate", "on outputted data nproc (int) : Number of threads to parallelize IgPhyML across", "fact that all sequences are compared pairwise, and all are zero # distance", "= ros receptor.sequence_imgt = ris return frameshifts def findAndMask(receptor, scodons, qcodons, spos, s_end,", "fail_writer (changeo.IO.TSVWriter): failed sequences writer object. min_seq (int): minimum number of data sequences", "by repeating this method but with an edited input sequence if not recursive:", "column name to append to sequence_id igphyml (bool): If True, run IgPhyML on", "depth of subsampling before deduplication min_seq (int): minimum number of sequences per clone", "if scodons[i] != \"...\": if scodons[i][0:2] == \"..\": scodons[i] = \"NN\" + scodons[i][2]", "ndotgaps.append(1) else: gaps.append(0) nsi = nsi + si[i] if si[i] != \".\": ndotgaps.append(0)", "collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt): \"\"\" Create intermediate sequence alignment and", "starting position of query sequence qi = qi[(receptor.v_seq_start - 1):] #tally where ---", "is not None: log_handle = open(out_args[\"log_file\"], \"w\") for j in logs.keys(): printLog(logs[j], handle=log_handle)", ": duplicate sequence if only one in a clone. imgt (list) : IMGT", "1 ncon_seq = ncon_seq + concatenated_seq[counter:] concatenated_seq = ncon_seq log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"]", "= receptor.sequence_imgt psite = receptor.v_seq_start - 1 + oqpos*3 pisite = ospos *", "range(0, sites, 3): tally = 0 for j in range(0, nseqs): if sequences[j][i:(i", "= parser.add_argument_group(\"IgPhyML arguments (see igphyml -h for details)\") igphyml_group.add_argument(\"--igphyml\", action=\"store_true\", dest=\"igphyml\", help=\"\"\"Run IgPhyML", "pass_handle = getOutputHandle(db_file, out_label=out_label, out_dir=out_args[\"out_dir\"], out_name= out_args[\"out_name\"], out_type=\"tsv\") igphyml_out = None if igphyml:", "= j conseqs.append(conseq) if collapse: useqs_f = deduplicate(useqs_f, clones, logs, meta_data, delim) if", "characterizePartitionErrors(sequences, clones, meta_data) tallies = [] for i in range(0, sites, 3): tally", "sequences (list): list of sequences in clones. clones (list): list of Receptor objects.", "ngerm imgt = nimgt #print(\"Length: \" + str(ncdr3)) useqs_f = OrderedDict() conseqs =", "\"\"\" frameshifts = 0 for ins in range(1, 3): ros = receptor.sequence_input ris", "sequences in clones. clones (list): list of Receptor objects. meta_data (str): Field to", "(str): output format for IgPhyML (tab or txt) nohlp (bool): If True, only", "\".\" elif i == 0: ncon_seq = ncon_seq + concatenated_seq[counter] counter += 1", "a codon that matches next site if debug: print(\"checking %s at position %d", "processing printMessage(\"Done\", start_time=start_time, end=True, width=50) log_handle = None if out_args[\"log_file\"] is not None:", "in range(0,len(seq1)): if seq1[i] != \"N\" and seq1[i] != \"-\" and seq1[i] !=", "= estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--motifs\", action=\"store\", dest=\"motifs\", type=str, default=\"WRC_2:0,GYW_0:1,WA_1:2,TW_0:3,SYC_2:4,GRS_0:5\", help=\"\"\"Which motifs to", "trees from running IgPhyML, if specified required fields: sequence_id, sequence, sequence_alignment, germline_alignment_d_mask or", "if seq1[i] != seq2[i]: dist += 1 if fbreak: break return dist def", "cimgt[-1] cimgt.extend([last]*(imgtdiff)) clones[j].setField(\"imgtpartlabels\",cimgt) if meta_data is not None: meta_data_ar = meta_data[0].split(\",\") for c", "collapse: return len(useqs_f) else: return nseqs def maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args,", "logs[clones[j].sequence_id][\"FAIL\"] = \"Duplication of \" + clones[useqs_f[conseq_f]].sequence_id logs[clones[j].sequence_id][\"DUPLICATE\"]=True if fail_writer is not None:", "\"\" delim = \"_\" duplicate = True # duplicate sequences in clones with", "be legitimately absent from the query, at which point #we have to shift", "for IgPhyML output Arguments: clones (list): receptor objects within the same clone. sequences", "= len(sequences) imgtar = clones[0].getField(\"imgtpartlabels\") germline = clones[0].getField(\"germline_imgt_d_mask\") if germline is \"\": germline", "join with (least ambiguous chars) joinseqs = {} # id -> useq to", "not None: meta_data_ar = meta_data[0].split(\",\") for c in clones: if meta_data is not", "\"nf_fail\":0, \"del_fail\":0, \"in_fail\":0, \"minseq_fail\":0, \"other_fail\":0, \"region_fail\":0, \"germlineptc\":0, \"fdcount\":0, \"totalreads\":0, \"passreads\":0, \"failreads\":0} # Mask", "(dict) : log of information for each sequence. debug (bool) : print debugging", "sequences.\"\"\") group.add_argument(\"--nmask\", action=\"store_true\", dest=\"nmask\", help=\"\"\"If specified, do not attempt to mask split codons.\"\"\")", "if debug: print(receptor.sequence_id) # adjust starting position of query sequence qi = qi[(receptor.v_seq_start", "motifs=motifs, hotness=hotness, oformat=oformat, nohlp=nohlp,clean=clean,asr=asr) return output def getArgParser(): \"\"\" Defines the ArgumentParser Returns:", "True) m_match = True if meta_data is not None: matches = 0 for", "(scodons[ospos], ospos), debug) log[\"PASS\"]=False log[\"FAIL\"]=\"FAILED_MATCH:\"+str(spos) elif qcodons[qpos] == scodons[spos]: printDebug(\"Masked %s at position", "reverse=True): #print(key + \"\\t\" + str(clonesizes[key])) outfile = os.path.join(clone_dir, \"%s.fasta\" % key) partfile", "for c in clones: ngermline = c.getField(\"germline_imgt_d_mask\") if ngermline is \"\": ngermline =", "cloneseqs, logs, fails, out_args, fail_writer, mask=True): \"\"\" Masks codons split by alignment to", "IMGT seq, other times it will be legitimately absent from the query, at", "+ [30] * len(regions[\"cdr1_imgt\"]) + [45] * len( regions[\"fwr2_imgt\"]) + \\ [60] *", "of joined sequences and collapse keys = list(useqs.keys()) for k in keys: if", "printWarning, printError, printDebug from changeo.Defaults import default_format from changeo.IO import splitName, getDbFields, getFormatOperators,", "simgt != r.sequence_imgt: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"SEQ_IN\"]", "(scodons[ospos], ospos), debug) if \"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" +", "+= 1 if qcodons[qpos-1] == scodons[ospos]: #if codon in previous position is equal", "log[\"FAIL\"] = \"FAILED_MATCH_QSTRING:\"+str(spos) #figure out if this was due to a frame-shift by", "= simgt fails[\"seq_fail\"] += 1 fails[\"region_fail\"] += 1 return 0 elif regions[\"fwr3_imgt\"] is", "files? (none, all) \"\"\" osplit = outfile.split(\".\") outrep = \".\".join(osplit[0:(len(osplit)-1)]) + \"_gy.tsv\" gyout", "combination of topology (t) branch lengths (l) and parameters (r) in IgPhyML. omega", "scodons[i] = \"NN\" + scodons[i][2] #sometimes IMGT will just cut off first letter", "j in range(0, nseqs): logs[clones[j].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(conseqs)) logs[clones[j].sequence_id][\"PASS\"]", "for i in range(0, len(newgerm)): clonef.write(\"%s\" % newgerm[i].replace(\".\",\"-\")) clonef.write(\"\\n\") #output partition file partfile", "+ \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] imgtpartlabels = [13]*len(regions[\"fwr1_imgt\"]) +", "function called recursively? \"\"\" frameshifts = 0 while spos < s_end and qpos", "0 for i in gaps: #print(str(i) + \":\" + ncon_seq) if i ==", "outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt) if collapse:", "pass_handle is not None else None log[\"RECORDS\"] = fails[\"totalreads\"] log[\"INITIAL_FILTER\"] = fails[\"rec_count\"] log[\"PASS\"]", "and qcodons[qpos] != \"...\": #if IMGT gap, move forward in imgt spos +=", "Find and mask split codons Arguments: receptor (changeo.Receptor.Receptor): Receptor object. scodons (list): list", "folder containing fasta and partition files for each clone. lineages successfully processed records.", "= (\"TAA\", \"TGA\", \"TAG\", \"TRA\", \"TRG\", \"TAR\", \"TGR\", \"TRR\") for i in range(0,", "receptor.sequence_imgt log[\"SEQ_MASKED\"] = concatenated_seq return concatenated_seq, log def unAmbigDist(seq1, seq2, fbreak=False): \"\"\" Calculate", "k log[rfrom.sequence_id][\"FAIL\"] = \"Collapsed with \" + rto.sequence_id del useqs[k] return useqs def", "\"0\" sid = clones[j].sequence_id.translate(transtable)+\"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) germ_id =", "optimization in IgPhyML (-t) motifs (str): motifs to use in IgPhyML (--motifs) hotness", "+ str(spos) else: log[\"END-MASKED\"] = str(spos) else: printDebug(\"Masked %s at position %d, but", "useqs[k] in join: rfrom = receptors[useqs[k]] rto = receptors[join[useqs[k]]] rto.dupcount += rfrom.dupcount if", "range(0, len(si), 3)] # deal with the fact that it's possible to start", "dict of sequence information \"\"\" debug = False qi = receptor.sequence_input si =", "dest=\"meta_data\", help=\"\"\"List of fields to containing metadata to include in output fasta file", "curgap % 3 != 0 : printDebug(\"Frame-shifting gap detected! Refusing to include sequence.\",", "action=\"store_true\", dest=\"collapse\", help=\"\"\"If specified, collapse identical sequences before exporting to fasta.\"\"\") group.add_argument(\"--ncdr3\", action=\"store_true\",", "!= len(imgtar): printError(\"IMGT assignments are not the same within clone %d!\\n\" % c.clone,False)", "identical sequences with different meta_data. clones (list) : list of receptor objects. collapse", "receptor.sequence_input = ros receptor.sequence_imgt = ris frameshifts += 1 printDebug(\"FRAMESHIFT of length %d!\"", "qcodons, spos, s_end, qpos, log, debug, recursive=False): \"\"\" Find and mask split codons", "sequence.\", debug) log[\"PASS\"] = False log[\"FAIL\"] = \"FRAME-SHIFTING DELETION\" log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"]", "columns to append to sequence ID to ensure uniqueness.\"\"\") igphyml_group = parser.add_argument_group(\"IgPhyML arguments", "gapped sequence. log: dict of sequence information \"\"\" debug = False qi =", "< 0: #If IMGT regions are provided, record their positions rd = RegionDefinition(r.junction_length,", "+ ris[(pisite + 3):] # Debug sequence modifications printDebug(ros, debug) printDebug(receptor.sequence_input, debug) printDebug(ris,", "j in range(0, len(sequences)): printError(\"%s\\n%s\\n\" % (sequences[j],clones[j].getField(\"imgtpartlabels\")),False) printError(\"ChangeO file needs to be corrected\")", "= \"\" delim = \"_\" duplicate = True # duplicate sequences in clones", "germ_id.append(\"GERM\") clonef.write(\">%s_%s\\n\" % (clones[0].clone,\"_\".join(germ_id))) for i in range(0, len(newgerm)): clonef.write(\"%s\" % newgerm[i].replace(\".\",\"-\")) clonef.write(\"\\n\")", "for j in range(0, len(imgt)): if imgt[j] != 108: nseq.append(newseqs[i][j]) if i ==", "\"\"\" Identify junction region by IMGT definition. Arguments: receptor (changeo.Receptor.Receptor): Receptor object. recursive", "str): clones[j].setField(meta_data[m],clones[j].getField(meta_data[m]).replace(\"_\", \"\")) meta_data_list.append(str(clones[j].getField(meta_data[m]))) conseq_f = \"\".join([str(seq_rec) for seq_rec in newseqs[j]])+delim+\":\".join(meta_data_list) else: conseq_f", "from changeo.Alignment import RegionDefinition from changeo.Commandline import CommonHelpFormatter, checkArgs, getCommonArgParser, parseCommonArgs def correctMidCodonStart(scodons,", "both forward spos += 1 qpos += 1 elif qcodons[qpos] == \"N\": #", "the #alignment if scodons[i][2:3] != qi[2:3] or scodons[i + 1] != qi[3:6]: qi", "(matches == len(meta_data)) if dist == 0 and m_match: ncounti = ki.count(\"A\") +", "1 newseqs[i] = nseq newgerm = ngerm imgt = nimgt #print(\"Length: \" +", "to be corrected\") for j in range(0,len(imgtar)): if c.getField(\"imgtpartlabels\")[j] != imgtar[j]: printError(\"IMGT assignments", "found that match specified criteria.\",1) if sample_depth > 0: random.shuffle(big_enough) total = 0", "counter += 1 ncon_seq = ncon_seq + concatenated_seq[counter:] concatenated_seq = ncon_seq log[\"SEQ_IN\"] =", "r.dupcount if r.clone in clones: clones[r.clone].append(r) cloneseqs[r.clone].append(mask_seq) else: clones[r.clone] = [r] cloneseqs[r.clone] =", "igphyml_group.add_argument(\"--clean\", action=\"store\", choices=(\"none\", \"all\"), dest=\"clean\", type=str, default=\"none\", help=\"\"\"Delete intermediate files? none: leave all", "simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] +", "== \"tab\": igf = open(igphyml_out) names = igf.readline().split(\"\\t\") vals = igf.readline().split(\"\\t\") for i", "= ncon_seq + concatenated_seq[counter:] concatenated_seq = ncon_seq log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt", "fails[\"seq_fail\"] += 1 fails[\"germlineptc\"] += 1 return 0 if r.functional and ptcs <", "= (\"e\", \"ce\", \"e,e\", \"ce,e\", \"e,ce\", \"ce,ce\"), help=\"\"\"Omega parameters to estimate for FWR,CDR", "v_sequence_start \"\"\") # Parent parser parser_parent = getCommonArgParser(out_file=False, log=True, format=True) # Define argument", "rd.getRegions(r.sequence_imgt) if regions[\"cdr3_imgt\"] is not \"\" and regions[\"cdr3_imgt\"] is not None: simgt =", "Field to append to sequence IDs. Splits identical sequences with different meta_data target_clones", "log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = concatenated_seq return concatenated_seq, log def unAmbigDist(seq1, seq2, fbreak=False):", "position of IMGT sequence in input sequence s_end (int): end of IMGT sequence", "dist = 0 for i in range(0,len(seq1)): if seq1[i] != \"N\" and seq1[i]", "+= 1 return 0 elif regions[\"fwr3_imgt\"] is not \"\" and regions[\"fwr3_imgt\"] is not", "m_match: ncounti = ki.count(\"A\") + ki.count(\"T\") + ki.count(\"G\") + ki.count(\"C\") ncountj = kj.count(\"A\")", "meta_data is not None: c.setField(meta_data[0],c.getField(meta_data_ar[0])) for m in range(1,len(meta_data_ar)): st = c.getField(meta_data[0])+\":\"+c.getField(meta_data_ar[m]) c.setField(meta_data[0],st)", "polymorphic sites def buildTrees(db_file, meta_data=None, target_clones=None, collapse=False, ncdr3=False, nmask=False, sample_depth=-1, min_seq=1,append=None, igphyml=False, nproc=1,", "topology (t) branch lengths (l) and parameters (r), or nothing (n), for IgPhyML.\"\"\")", "(pisite + 3) < len(ris): #cut out 1 or 2 nucleotides downstream of", "differences, e.g. if reconstruction was done before clonal clustering resolveglines = False for", "!= \"N\" and seq2[i] != \"-\" and seq2[i] != \".\": if seq1[i] !=", "scodons, qcodons, spos, s_end, qpos, log, debug, recursive=False): \"\"\" Find and mask split", "seqdiff = maxlen - len(sequences[j]) imgtdiff = len(imgtar)-len(cimgt) sequences[j] = sequences[j] + \"N\"*(seqdiff)", "if found_no_funct is False: printWarning(\"FUNCTIONAL column not found.\") found_no_funct = True r.sequence_id =", "fails. 1: returns 1 masking succeeds \"\"\" if r.clone is None: printError(\"Cannot export", "big_enough = [] all_records = [] found_no_funct = False for r in records:", "equal to original codon, it was preserved qpos -= 1 spos = ospos", "debug, recursive=False): \"\"\" Find and mask split codons Arguments: receptor (changeo.Receptor.Receptor): Receptor object.", "-h for details)\") igphyml_group.add_argument(\"--igphyml\", action=\"store_true\", dest=\"igphyml\", help=\"\"\"Run IgPhyML on output?\"\"\") igphyml_group.add_argument(\"--nproc\", action=\"store\", dest=\"nproc\",", "error\" logs[r.sequence_id][\"FWRCDRSEQ\"] = simgt fails[\"seq_fail\"] += 1 fails[\"region_fail\"] += 1 return 0 else:", "split codons sample_depth (int): depth of subsampling before deduplication min_seq (int): minimum number", "si = receptor.sequence_imgt log = OrderedDict() log[\"ID\"]=receptor.sequence_id log[\"CLONE\"]=receptor.clone log[\"PASS\"] = True if debug:", "(int) : Number of threads to parallelize IgPhyML across optimization (str): Optimize combination", "format=default_format, out_args=default_out_args): \"\"\" Masks codons split by alignment to IMGT reference, then produces", "lengths (l) and parameters (r) in IgPhyML. omega (str): omega optimization in IgPhyML", "each clone. lineages successfully processed records. lineages-fail database records failed processing. igphyml-pass parameter", "= \"IgPhyML HLP analysis\" log[\"OPTIMIZE\"] = optimization log[\"TS/TV\"] = kappa log[\"wFWR,wCDR\"] = omega", "in join: ncj = ambigchar[join[useqs[kj]]] if ncj < ncounti: join[useqs[kj]] = useqs[ki] joinseqs[kj]", "and partition files for each clone. lineages successfully processed records. lineages-fail database records", "__date__ # Imports import os import random import subprocess import multiprocessing as mp", "(t) branch lengths (l) and parameters (r) in IgPhyML. omega (str): omega optimization", "CDR3 logs (dict): contains log information for each sequence out_dir (str): directory for", "sites, nseqs def outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm, conseqs, duplicate,", "receptors[join[useqs[k]]] rto.dupcount += rfrom.dupcount if log is not None: log[rfrom.sequence_id][\"PASS\"] = False log[rfrom.sequence_id][\"DUPLICATE\"]", "newseqs = [] # remove gap only sites from observed data newgerm =", "useqs_f = OrderedDict() conseqs = [] for j in range(0, nseqs): conseq =", "sequence IDs. Splits identical sequences with different meta_data. delim (str): delimited to use", "and ptcs < 0: #If IMGT regions are provided, record their positions rd", "name. igphymlout (str): igphyml output file nproc (int): Number of threads to parallelize", "(list): list of codons in IMGT sequence qcodons (list): list of codons in", "= len(newgerm) sites = range(0, lg) transtable = clones[0].sequence_id.maketrans(\" \", \"_\") outfile =", "receptor in clones, the length of the first sequence in clones, and the", "in IgPhyML (--hotness) oformat (str): output format for IgPhyML (tab or txt) clean", "0: random.shuffle(big_enough) total = 0 for r in big_enough: if r.functional is None:", "log if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"nf_fail\"] += 1 return 0 #", "[13] * len(regions[\"fwr1_imgt\"]) + [30] * len(regions[\"cdr1_imgt\"]) + [45] * len( regions[\"fwr2_imgt\"]) +", "== sample_depth: break # Start processing clones clonesizes = {} pass_count, nclones =", "log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"Germline PTC\" fails[\"seq_fail\"] += 1 fails[\"germlineptc\"] +=", "GY94 tree estimation\" printLog(log) try: #check for igphyml executable subprocess.check_output([\"igphyml\"]) except: printError(\"igphyml not", "total += maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer, mask = not nmask)", "(bool): was this function called recursively? \"\"\" frameshifts = 0 while spos <", "fails[\"failreads\"] += r.dupcount if mout[1][\"FAIL\"] == \"FRAME-SHIFTING DELETION\": fails[\"del_fail\"] += 1 elif mout[1][\"FAIL\"]", "Splits identical sequences with different meta_data target_clones (str): List of clone IDs to", "ndotgaps.append(0) #find any gaps not divisible by three curgap = 0 for i", "are compared pairwise, and all are zero # distance from the sequence they", "for IgPhyML Arguments: db_file (str): input tab-delimited database file. meta_data (str): Field to", "printDebug(\"FAILING MATCH\", debug) log[\"PASS\"] = False #if no match for the adjacent codon", "igphyml_out] if asr >= 0: hlp_args.append(\"--ASRc\") hlp_args.append(str(asr)) log = OrderedDict() log[\"START\"] = \"IgPhyML", "of query sequence qi = qi[(receptor.v_seq_start - 1):] #tally where --- gaps are", "sequences[seqi] if len(i) != sites or len(clones[seqi].getField(\"imgtpartlabels\")) != len(imgtar): correctseqs = True if", "choices=(\"e\", \"ce\"), help=\"\"\"Kappa parameters to estimate: e = estimate, ce = estimate +", "seq1[i] != \"N\" and seq1[i] != \"-\" and seq1[i] != \".\": if seq2[i]", "= False return -len(conseqs) # Output fasta file of masked, concatenated sequences outputSeqPartFiles(out_dir,", "psite = receptor.v_seq_start - 1 + oqpos*3 pisite = ospos * 3 if", "qi = qi[(receptor.v_seq_start - 1):] #tally where --- gaps are in IMGT sequence", "\"\".join(nseq) #print(\"Length: \" + str(ncdr3)) def characterizePartitionErrors(sequences, clones, meta_data): \"\"\" Characterize potential mismatches", "receptors[useqs[ki]] rj = receptors[useqs[kj]] dist = unAmbigDist(ski, skj, True) m_match = True if", "If True, only estimate GY94 trees and parameters clean (str): delete intermediate files?", "= ncon_seq log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = concatenated_seq return concatenated_seq,", "out_dir=None, min_seq=1): \"\"\" Create intermediate sequence alignment and partition files for IgPhyML output", "sequence ncon_seq = \"\" counter = 0 for i in gaps: #print(str(i) +", "scodons (list): list of codons in IMGT sequence qcodons (list): list of codons", "(str): igphyml output file nproc (int): Number of threads to parallelize IgPhyML across", "fact that IMGT sequences can end on gaps for i in range(spos, len(scodons)):", "oqpos, ospos, log, debug) elif spos >= s_end or qcodons[qpos] != scodons[spos]: scodons[ospos]", "nproc (int) : Number of threads to parallelize IgPhyML across optimization (str): Optimize", "num in useqs_f.items(): logs[clones[num].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(useqs_f)) logs[clones[num].sequence_id][\"PASS\"] =", "help=\"\"\"Which motifs to estimate mutability.\"\"\") igphyml_group.add_argument(\"--hotness\", action=\"store\", dest=\"hotness\", type=str, default=\"e,e,e,e,e,e\", help=\"\"\"Mutability parameters to", "\"BuildTrees\" log[\"FILE\"] = os.path.basename(db_file) log[\"COLLAPSE\"] = collapse printLog(log) # Open output files out_label", "= kappa log[\"wFWR,wCDR\"] = omega log[\"MOTIFS\"] = motifs log[\"HOTNESS\"] = hotness log[\"NPROC\"] =", "start_time=start_time, width=50) #subsampling loop init_clone_sizes = {} big_enough = [] all_records = []", "= mout[1] if mout[1][\"PASS\"]: #passreads += r.dupcount if r.clone in clones: clones[r.clone].append(r) cloneseqs[r.clone].append(mask_seq)", "rec_count, 0.05, start_time) log = OrderedDict() log[\"OUTPUT\"] = os.path.basename(pass_handle.name) if pass_handle is not", "log = OrderedDict() log[\"OUTPUT\"] = igphyml_out if oformat == \"tab\": igf = open(igphyml_out)", "output[\"fail\"] = fail_handle.name fail_handle.close() if log_handle is not None: log_handle.close() #printProgress(rec_count, rec_count, 0.05,", "printMessage(\"Correcting frames and indels of sequences\", start_time=start_time, width=50) #subsampling loop init_clone_sizes = {}", "log[\"PASS\"] and not recursive: log[\"FRAMESHIFTS\"] = frameshifts if len(scodons[-1]) != 3: if scodons[-1]", "to cluster sequences into clones first and then predict germlines using --cloned\")) if", "Defines the ArgumentParser Returns: argparse.ArgumentParser: argument parsers. \"\"\" # Define input and output", "if r.clone in clones: clones[r.clone].append(r) cloneseqs[r.clone].append(mask_seq) else: clones[r.clone] = [r] cloneseqs[r.clone] = [mask_seq]", "ambiguous characters at polymorphic sites def buildTrees(db_file, meta_data=None, target_clones=None, collapse=False, ncdr3=False, nmask=False, sample_depth=-1,", "\"...\": if scodons[i][0:2] == \"..\": scodons[i] = \"NN\" + scodons[i][2] #sometimes IMGT will", "a negative if clonesizes[str(k)] > 0: nclones += 1 pass_count += clonesizes[str(k)] else:", "= OrderedDict() fails = {\"rec_count\":0, \"seq_fail\":0, \"nf_fail\":0, \"del_fail\":0, \"in_fail\":0, \"minseq_fail\":0, \"other_fail\":0, \"region_fail\":0, \"germlineptc\":0,", "collections import OrderedDict from textwrap import dedent from time import time from Bio.Seq", "m in range(1,len(meta_data_ar)): st = c.getField(meta_data[0])+\":\"+c.getField(meta_data_ar[m]) c.setField(meta_data[0],st) if len(c.getField(\"imgtpartlabels\")) != len(imgtar): printError(\"IMGT assignments", "not the same within clone %d!\\n\" % c.clone, False) printError(c.getField(\"imgtpartlabels\"), False) printError(\"%s\\n%d\\n\" %", "Format options try: reader, writer, __ = getFormatOperators(format) except ValueError: printError(\"Invalid format %s.\"", "logs=logs, fail_writer=fail_writer, out_dir=clone_dir, min_seq=min_seq) #If clone is too small, size is returned as", "[qi[i:i + 3] for i in range(0, len(qi), 3)] frameshifts = 0 s_end", "clean (str): delete intermediate files? (none, all) \"\"\" osplit = outfile.split(\".\") outrep =", "else: conseq_f = conseq if conseq_f in useqs_f and collapse: clones[useqs_f[conseq_f]].dupcount += clones[j].dupcount", "= keys[i] skj = keys[j] else: ski, cid = keys[i].split(delim) skj, cid =", "returns 1 masking succeeds \"\"\" if r.clone is None: printError(\"Cannot export datasets until", "containing a list of IMGT positions for first sequence in clones, the germline", "log[\"PASS\"] = pass_count log[\"FAIL\"] = fail_count log[\"NONFUNCTIONAL\"] = fails[\"nf_fail\"] log[\"FRAMESHIFT_DEL\"] = fails[\"del_fail\"] log[\"FRAMESHIFT_INS\"]", "if True remove CDR3 logs (dict): contains log information for each sequence out_dir", "to append to sequence IDs. Splits identical sequences with different meta_data target_clones (str):", "+ \\ regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] if len(simgt) < len(r.sequence_imgt): r.fwr4_imgt =", "\"\")) meta_data_list.append(str(clones[j].getField(meta_data[m]))) conseq_f = \"\".join([str(seq_rec) for seq_rec in newseqs[j]])+delim+\":\".join(meta_data_list) else: conseq_f = conseq", "append to sequence IDs. Splits identical sequences with different meta_data. delim (str): delimited", "in range(0, len(clones[str(k)])): logs[clones[str(k)][j].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(cloneseqs[str(k)])) logs[clones[str(k)][j].sequence_id][\"PASS\"] =", "regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] nseq = r.sequence_imgt[len(simgt):len(r.sequence_imgt)]", "fail_handle, fail_writer = None, None if out_args[\"failed\"]: fail_handle = getOutputHandle(db_file, out_label=\"lineages-fail\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"],", "nproc (int): Number of threads to parallelize IgPhyML across optimization (str): Optimize combination", "qpos += 1 while spos < s_end and scodons[spos] == \"...\": #possible next", "parsers. \"\"\" # Define input and output field help message fields = dedent(", "3 and scodons[i] != \"NNN\": s_end = i printDebug(\"%i:%i:%s\" % (s_end, len(scodons), scodons[s_end]),", "= ros[0:(psite + 3)] + ros[(psite + 3 + ins):] receptor.sequence_imgt = ris[0:(pisite", "= \"lineages\" pass_handle = getOutputHandle(db_file, out_label=out_label, out_dir=out_args[\"out_dir\"], out_name= out_args[\"out_name\"], out_type=\"tsv\") igphyml_out = None", "len(imgtar)-len(cimgt) sequences[j] = sequences[j] + \"N\"*(seqdiff) last = cimgt[-1] cimgt.extend([last]*(imgtdiff)) clones[j].setField(\"imgtpartlabels\",cimgt) if meta_data", "Field to append to sequence IDs. Splits identical sequences with different meta_data. Returns:", "specified number of sequences will be excluded.\"\"\") group.add_argument(\"--sample\", action=\"store\", dest=\"sample_depth\", type=int, default=-1, help=\"\"\"Depth", "provided, record their positions rd = RegionDefinition(r.junction_length, amino_acid=False) regions = rd.getRegions(r.sequence_imgt) if regions[\"cdr3_imgt\"]", "qi[3:6]: qi = \"N\" + qi spos = i break else: spos =", "ambigchar[join[useqs[kj]]] if ncj < ncounti: join[useqs[kj]] = useqs[ki] joinseqs[kj] = ki # loop", "reference Arguments: r (changeo.Receptor.Receptor): receptor object for a particular sequence. clones (list): list", "small, size is returned as a negative if clonesizes[str(k)] > 0: nclones +=", "log[\"END\"] = \"IgPhyML analysis\" printLog(log) # Note: Collapse can give misleading dupcount information", "logs[clones[num].sequence_id][\"PASS\"] = False return -len(useqs_f) elif not collapse and len(conseqs) < min_seq: for", "None, \"fail\": None} if pass_handle is not None: output[\"pass\"] = pass_handle.name pass_handle.close() if", "logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"Germline PTC\" fails[\"seq_fail\"] += 1 fails[\"germlineptc\"] += 1", "= [mask_seq] return 1 else: if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"failreads\"] +=", "+ regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] imgtpartlabels =", "= OrderedDict() log[\"START\"] = \"IgPhyML HLP analysis\" log[\"OPTIMIZE\"] = optimization log[\"TS/TV\"] = kappa", "= str(spos) else: printDebug(\"Masked %s at position %d, but couldn't find upstream match\"", "sequences[j][i:(i + 3)] != \"...\": tally += 1 tallies.append(tally) newseqs = [] #", "= log[\"IN-FRAME\"] + \",\" + str(spos) else: log[\"IN-FRAME\"] = str(spos) elif qpos >=", "if append is not None: for m in append: r.sequence_id = r.sequence_id +", "files; all: delete all intermediate files.\"\"\") igphyml_group.add_argument(\"--optimize\", action=\"store\", dest=\"optimization\", type=str, default=\"lr\", choices=(\"n\",\"r\",\"l\",\"lr\",\"tl\",\"tlr\"), help=\"\"\"Optimize", "log def unAmbigDist(seq1, seq2, fbreak=False): \"\"\" Calculate the distance between two sequences counting", "\".join(hlp_args)) print('error>', e.output, '<') printError(\"HLP tree building failed\") log = OrderedDict() log[\"OUTPUT\"] =", "= os.path.join(out_dir, \"%s.part.txt\" % clones[0].clone) with open(partfile, \"w\") as partf: partf.write(\"%d %d\\n\" %", "sequences. ncdr3 (bool): if True remove all CDR3s. nmask (bool): if False, do", "\"TGA\", \"TAG\", \"TRA\", \"TRG\", \"TAR\", \"TGR\", \"TRR\") for i in range(0, len(sequence), 3):", "scodons[i + 1] != qi[3:6]: qi = \"NN\" + qi spos = i", "clones, meta_data) tallies = [] for i in range(0, sites, 3): tally =", "clones = {} logs = OrderedDict() fails = {\"rec_count\":0, \"seq_fail\":0, \"nf_fail\":0, \"del_fail\":0, \"in_fail\":0,", "file nproc (int): Number of threads to parallelize IgPhyML across optimization (str): Optimize", "if nseqs == 1 and duplicate: if meta_data is not None: if meta_data[0]", "= False return -len(useqs_f) elif not collapse and len(conseqs) < min_seq: for j", "regions[\"cdr3_imgt\"] is not \"\" and regions[\"cdr3_imgt\"] is not None: simgt = regions[\"fwr1_imgt\"] +", "or scodons[i + 1] != qi[3:6]: qi = \"NN\" + qi spos =", "if clean != \"none\": log = OrderedDict() log[\"START\"] = \"CLEANING\" log[\"SCOPE\"] = clean", "%d\" % (scodons[ospos], ospos), debug) scodons[ospos] = \"NNN\" if \"MASKED\" in log: log[\"MASKED\"]", "in IMGT sequence. log (dict) : log of information for each sequence. debug", "this method but with an edited input sequence if not recursive: frameshifts +=", "(changeo.IO.TSVWriter): failed sequences writer object. min_seq (int): minimum number of data sequences to", "collapse to. if ncountj > ncounti: nci = 0 if useqs[ki] in join:", "txt) nohlp (bool): If True, only estimate GY94 trees and parameters clean (str):", "def characterizePartitionErrors(sequences, clones, meta_data): \"\"\" Characterize potential mismatches between IMGT labels within a", "if scodons[spos] == \"...\" and qcodons[qpos] != \"...\": #if IMGT gap, move forward", "output files out_label = \"lineages\" pass_handle = getOutputHandle(db_file, out_label=out_label, out_dir=out_args[\"out_dir\"], out_name= out_args[\"out_name\"], out_type=\"tsv\")", "#fails[\"minseq_fail\"] = len(all_records) - len(big_enough) if len(big_enough) == 0: printError(\"\\n\\nNo sequences found that", "i in range(0,len(sequences)): imgtar = clones[i].getField(\"imgtpartlabels\") germline = clones[i].getField(\"germline_imgt_d_mask\") nseq = [] nimgtar", "ngerm = [] nimgt = [] for i in range(0, len(newseqs)): nseq =", "list of masked clone sequences. logs (dict): contains log information for each sequence.", "+ \"_igphyml_stats_gy.txt\" gy_args = [\"igphyml\", \"--repfile\", outfile, \"-m\", \"GY\", \"--run_id\", \"gy\", \"--outrep\", outrep,", "default=\"WRC_2:0,GYW_0:1,WA_1:2,TW_0:3,SYC_2:4,GRS_0:5\", help=\"\"\"Which motifs to estimate mutability.\"\"\") igphyml_group.add_argument(\"--hotness\", action=\"store\", dest=\"hotness\", type=str, default=\"e,e,e,e,e,e\", help=\"\"\"Mutability parameters", "alignment and partition files for IgPhyML output Arguments: out_dir (str): directory for sequence", "str: modified IMGT gapped sequence. log: dict of sequence information \"\"\" debug =", "germline if there are differences, e.g. if reconstruction was done before clonal clustering", "\"r\") records = reader(handle) fail_handle, fail_writer = None, None if out_args[\"failed\"]: fail_handle =", "codons for use with igphyml? Returns: str: modified IMGT gapped sequence. log: dict", "% (scodons[ospos], ospos), debug) scodons[ospos] = \"NNN\" if \"MASKED\" in log: log[\"MASKED\"] =", "kappa optimization in IgPhyML (-t) motifs (str): motifs to use in IgPhyML (--motifs)", "all: delete all intermediate files.\"\"\") igphyml_group.add_argument(\"--optimize\", action=\"store\", dest=\"optimization\", type=str, default=\"lr\", choices=(\"n\",\"r\",\"l\",\"lr\",\"tl\",\"tlr\"), help=\"\"\"Optimize combination", "germline, sites, nseqs def outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm, conseqs,", "r.sequence_id.replace(\"(\",\"-\") #remove parenthesis from sequence ID if(meta_data is not None): for m in", "method but with an edited input sequence if not recursive: frameshifts += checkFrameShifts(receptor,", "is None: printError(\"Cannot export datasets until sequences are clustered into clones.\") if r.dupcount", "of threads to parallelize IgPhyML across.\"\"\") igphyml_group.add_argument(\"--clean\", action=\"store\", choices=(\"none\", \"all\"), dest=\"clean\", type=str, default=\"none\",", "\"--omega\", omega, \"-t\", kappa, \"--motifs\", motifs, \"--hotness\", hotness, \"--oformat\", oformat, \"--outname\", igphyml_out] if", "list of joined sequences and collapse keys = list(useqs.keys()) for k in keys:", "debugging statements. Returns: tuple: (modified input sequence, modified starting position of IMGT sequence", "ncdr3 (bool): if True remove all CDR3s. nmask (bool): if False, do not", "fails[\"other_fail\"] += 1 else: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone", "(int): minimum number of data sequences to include. Returns: int: number of clones.", "Receptor objects. meta_data (str): Field to append to sequence IDs. Splits identical sequences", "j in range(0,len(imgtar)): if imgtar[j] != 108: nseq.append(sequences[i][j]) if j < len(germline): ngermline.append(germline[j])", "not collapse and len(conseqs) < min_seq: for j in range(0, nseqs): logs[clones[j].sequence_id][\"FAIL\"] =", "was this method part of a recursive call? mask (bool) : mask split", "type=int, default=1, help=\"\"\"Number of threads to parallelize IgPhyML across.\"\"\") igphyml_group.add_argument(\"--clean\", action=\"store\", choices=(\"none\", \"all\"),", "range(1, len(meta_data)): germ_id.append(\"GERM\") pass_handle.write(\"%s\\t%s\\t%s_%s\\t%s\\n\" % (outfile, \"N\", key,\"_\".join(germ_id), partfile)) handle.close() output = {\"pass\":", "codons in IMGT sequence. qi (str) : input sequence. spos (int) : starting", "in IgPhyML (--motifs) hotness (str): motif in IgPhyML (--hotness) oformat (str): output format", "message fields = dedent( \"\"\" output files: <folder> folder containing fasta and partition", "1 #printProgress(rec_count, rec_count, 0.05, start_time) ptcs = hasPTC(r.sequence_imgt) gptcs = hasPTC(r.getField(\"germline_imgt_d_mask\")) if gptcs", "directory for output files. fail_writer (changeo.IO.TSVWriter): failed sequences writer object. min_seq (int): minimum", "= k log[rfrom.sequence_id][\"FAIL\"] = \"Collapsed with \" + rto.sequence_id del useqs[k] return useqs", "\"HLP\", \"--run_id\", \"hlp\", \"--threads\", str(nproc), \"-o\", optimization, \"--omega\", omega, \"-t\", kappa, \"--motifs\", motifs,", "s_end and scodons[spos] == \"...\": #possible next codon is just a gap spos", "i == 1: curgap += 1 elif i == 0 and curgap !=", ". \"\"\" # bootstrap these data if desired lg = len(newgerm) sites =", "m in range(0,len(meta_data)): md = r.getField(meta_data[m]) md = md.replace(\",\",\"-\") #remove commas from metadata", "and qpos < len(qcodons): if debug: print(scodons[spos] + \"\\t\" + qcodons[qpos]) if scodons[spos]", "__ = splitName(db_file) else: clone_name = out_args[\"out_name\"] if dir_name is None: clone_dir =", "qi[(receptor.v_seq_start - 1):] #tally where --- gaps are in IMGT sequence and remove", "failed\") log = OrderedDict() log[\"START\"] = \"IgPhyML HLP analysis\" log[\"OPTIMIZE\"] = optimization log[\"TS/TV\"]", "have to do that yourself.\") log = OrderedDict() log[\"END\"] = \"IgPhyML analysis\" printLog(log)", "True collapse identical sequences. ncdr3 (bool): if True remove CDR3 logs (dict): contains", "\".join(gy_args)) print('error>', e.output, '<') printError(\"GY94 tree building in IgPhyML failed\") log = OrderedDict()", "type=float, default=-1, help=\"\"\"Ancestral sequence reconstruction interval (0-1).\"\"\") return parser if __name__ == \"__main__\":", "of codons in IMGT sequence qcodons (list): list of codons in input sequence", "same clone. sequences (list): sequences within the same clone (share indexes with clones", "+ rto.sequence_id del useqs[k] return useqs def hasPTC(sequence): \"\"\" Determines whether a PTC", "+= 1 else: fails[\"other_fail\"] += 1 else: log = OrderedDict() log[\"ID\"] = r.sequence_id", "None: matches = 0 for m in meta_data: if ri.getField(m) == rj.getField(m) and", "log[\"OUTPUT\"] = os.path.basename(pass_handle.name) if pass_handle is not None else None log[\"RECORDS\"] = fails[\"totalreads\"]", ">= 0: hlp_args.append(\"--ASRc\") hlp_args.append(str(asr)) log = OrderedDict() log[\"START\"] = \"IgPhyML GY94 tree estimation\"", "c.setField(meta_data[0],st) if len(c.getField(\"imgtpartlabels\")) != len(imgtar): printError(\"IMGT assignments are not the same within clone", "clonesizes[str(k)] = -len(cloneseqs[str(k)]) else: clonesizes[str(k)] = outputIgPhyML(clones[str(k)], cloneseqs[str(k)], meta_data=meta_data, collapse=collapse, ncdr3=ncdr3, logs=logs, fail_writer=fail_writer,", "mp from argparse import ArgumentParser from collections import OrderedDict from textwrap import dedent", "sequences. duplicate (bool) : duplicate sequence if only one in a clone. imgt", "i in range(0, len(si), 3)] # deal with the fact that it's possible", "= \"FWR/CDR error\" logs[r.sequence_id][\"FWRCDRSEQ\"] = simgt fails[\"seq_fail\"] += 1 fails[\"region_fail\"] += 1 return", "init_clone_sizes[r.clone] += 1 else: init_clone_sizes[r.clone] = 1 for r in all_records: if target_clones", "+ \\ [108] * int(len(nseq)) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or simgt", "default_out_args from presto.IO import printLog, printMessage, printWarning, printError, printDebug from changeo.Defaults import default_format", "<folder> folder containing fasta and partition files for each clone. lineages successfully processed", "s_end: printDebug(\"Masked %s at position %d, at end of subject sequence\" % (scodons[ospos],", "from changeo.Commandline import CommonHelpFormatter, checkArgs, getCommonArgParser, parseCommonArgs def correctMidCodonStart(scodons, qi, debug): \"\"\" Find", "% c.clone,False) printError(c.getField(\"imgtpartlabels\"),False) printError(\"%s\\n%d\\n\" % (imgtar,j),False) for j in range(0, len(sequences)): printError(\"%s\\n%s\\n\" %", "is None: r.functional = True if found_no_funct is False: printWarning(\"FUNCTIONAL column not found.\")", "# Run IgPhyML on outputed data def runIgPhyML(outfile, igphyml_out, clone_dir, nproc=1, optimization=\"lr\", omega=\"e,e\",", "PTC.. %s\\n\" % r.sequence_id) mout[1][\"PASS\"] = False mout[1][\"FAIL\"] = \"PTC_ADDED_FROM_MASKING\" logs[mout[1][\"ID\"]] = mout[1]", "nseq.append(sequences[i][j]) if j < len(germline): ngermline.append(germline[j]) nimgtar.append(imgtar[j]) else: ncdr3 += 1 clones[i].setField(\"imgtpartlabels\",nimgtar) clones[i].setField(\"germline_imgt_d_mask\",", "parser = getArgParser() checkArgs(parser) args = parser.parse_args() args_dict = parseCommonArgs(args) del args_dict[\"db_files\"] #", "clone_name) if not os.path.exists(clone_dir): os.makedirs(clone_dir) # Format options try: reader, writer, __ =", "ris frameshifts += 1 printDebug(\"FRAMESHIFT of length %d!\" % ins, debug) log[\"FAIL\"] =", "out_type=oformat) dir_name, __ = os.path.split(pass_handle.name) if out_args[\"out_name\"] is None: __, clone_name, __ =", "Receptor object. scodons (list): list of codons in IMGT sequence qcodons (list): list", "log[\"FRAMESHIFT_INS\"] = fails[\"in_fail\"] log[\"CLONETOOSMALL\"] = fails[\"minseq_fail\"] log[\"CDRFWR_ERROR\"] = fails[\"region_fail\"] log[\"GERMLINE_PTC\"] = fails[\"germlineptc\"] log[\"OTHER_FAIL\"]", "and changeo imports from presto.Defaults import default_out_args from presto.IO import printLog, printMessage, printWarning,", "commas from sequence ID r.sequence_id = r.sequence_id.replace(\":\",\"-\") #remove colons from sequence ID r.sequence_id", "= False clonesizes[str(k)] = -len(cloneseqs[str(k)]) else: clonesizes[str(k)] = outputIgPhyML(clones[str(k)], cloneseqs[str(k)], meta_data=meta_data, collapse=collapse, ncdr3=ncdr3,", "kj.count(\"A\") + kj.count(\"T\") + kj.count(\"G\") + kj.count(\"C\") ambigchar[useqs[ki]] = ncounti ambigchar[useqs[kj]] = ncountj", "to parallelize IgPhyML across.\"\"\") igphyml_group.add_argument(\"--clean\", action=\"store\", choices=(\"none\", \"all\"), dest=\"clean\", type=str, default=\"none\", help=\"\"\"Delete intermediate", "+= 1 newseqs[i] = nseq newgerm = ngerm imgt = nimgt #print(\"Length: \"", "= ris[0:(pisite + 3)] + ris[(pisite + 3):] # Debug sequence modifications printDebug(ros,", "if meta_data[0] == \"DUPCOUNT\": cid = delim + \"0\" sid = clones[num].sequence_id.translate(transtable) +", ", id: %s, seq: %s\" %(len(sequences[0]),\\ clones[0].clone,clones[0].sequence_id,sequences[0])) return imgtar, germline, sites, nseqs def", "(str): delete intermediate files? (none, all) nohlp (bool): If True, only estimate GY94", "in imgt spos += 1 elif scodons[spos] == qcodons[qpos]: # if both are", "for j in range(i+1,len(keys)): ki = keys[i] kj = keys[j] if meta_data is", "qcodons[qpos-1] == scodons[ospos]: #if codon in previous position is equal to original codon,", "from time import time from Bio.Seq import Seq from functools import partial #", "not found.\") found_no_funct = True all_records.append(r) if r.clone in init_clone_sizes: init_clone_sizes[r.clone] += 1", "\"%s.part.txt\" % key) if clonesizes[key] > 0: germ_id = [\"GERM\"] if meta_data is", "log[\"IN-FRAME\"] = log[\"IN-FRAME\"] + \",\" + str(spos) else: log[\"IN-FRAME\"] = str(spos) elif qpos", "= fails[\"region_fail\"] log[\"GERMLINE_PTC\"] = fails[\"germlineptc\"] log[\"OTHER_FAIL\"] = fails[\"other_fail\"] if collapse: log[\"DUPLICATE\"] = fail_count", "collapse and len(conseqs) < min_seq: for j in range(0, nseqs): logs[clones[j].sequence_id][\"FAIL\"] = \"Clone", "seq, other times it will be legitimately absent from the query, at which", "that site and scan forward until you find a codon that matches next", "== \"tab\": os.rmdir(clone_dir) else: printWarning(\"Using --clean all with --oformat txt will delete all", "with clones parameter). meta_data (str): Field to append to sequence IDs. Splits identical", "(str): directory for sequence files. useqs_f (dict): unique sequences mapped to ids. meta_data", "handle.close() output = {\"pass\": None, \"fail\": None} if pass_handle is not None: output[\"pass\"]", "writer(fail_handle, fields=out_fields) cloneseqs = {} clones = {} logs = OrderedDict() fails =", "= subprocess.check_output(hlp_args) except subprocess.CalledProcessError as e: print(\" \".join(hlp_args)) print('error>', e.output, '<') printError(\"HLP tree", "if clonesizes[key] > 0: germ_id = [\"GERM\"] if meta_data is not None: for", "changeo.Commandline import CommonHelpFormatter, checkArgs, getCommonArgParser, parseCommonArgs def correctMidCodonStart(scodons, qi, debug): \"\"\" Find and", "(changeo.Receptor.Receptor): Receptor object. scodons (list): list of codons in IMGT sequence qcodons (list):", "scan forward until you find a codon that matches next site if debug:", "Output fasta file of masked, concatenated sequences outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs,", "= list(useqs.keys()) for k in keys: if useqs[k] in join: rfrom = receptors[useqs[k]]", "letter if non-match, at which point we\"ll just want to mask the #first", "kj.count(\"C\") ambigchar[useqs[ki]] = ncounti ambigchar[useqs[kj]] = ncountj # this algorithm depends on the", "+ 3)] + ris[(pisite + 3):] # Debug sequence modifications printDebug(ros, debug) printDebug(receptor.sequence_input,", "Parses command line arguments and calls main \"\"\" # Parse command line arguments", "- len(simgt))) simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] +", "clone is too small, size is returned as a negative if clonesizes[str(k)] >", "nimgt = [] for i in range(0, len(newseqs)): nseq = [] ncdr3 =", "else: return nseqs def maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer, mask=True): \"\"\"", "nseq newgerm = ngerm imgt = nimgt #print(\"Length: \" + str(ncdr3)) useqs_f =", "not None: output[\"fail\"] = fail_handle.name fail_handle.close() if log_handle is not None: log_handle.close() #printProgress(rec_count,", "list of Receptor objects. meta_data (str): Field to append to sequence IDs. Splits", "len(regions[\"cdr2_imgt\"]) + [80] * len(regions[\"fwr3_imgt\"]) + \\ [108] * int(len(nseq)) r.setField(\"imgtpartlabels\", imgtpartlabels) if", "key=clonesizes.get, reverse=True): #print(key + \"\\t\" + str(clonesizes[key])) outfile = os.path.join(clone_dir, \"%s.fasta\" % key)", "sequence in frame 1. Returns: int: negative if not PTCs, position of PTC", "in useqs_f.items(): logs[clones[num].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(useqs_f)) logs[clones[num].sequence_id][\"PASS\"] = False", ">= min_seq: big_enough.append(r) fails[\"totalreads\"] = len(all_records) #fails[\"minseq_fail\"] = len(all_records) - len(big_enough) if len(big_enough)", "information for each sequence debug (bool): print debugging statements? recursive (bool): was this", "return frameshifts def findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log, debug, recursive=False): \"\"\"", "dupcount information if some sequences have ambiguous characters at polymorphic sites def buildTrees(db_file,", "as partf: partf.write(\"%d %d\\n\" % (2, len(newgerm))) partf.write(\"FWR:IMGT\\n\") partf.write(\"CDR:IMGT\\n\") partf.write(\"%s\\n\" % (clones[0].v_call.split(\"*\")[0])) partf.write(\"%s\\n\"", "= keys[j].split(delim) ri = receptors[useqs[ki]] rj = receptors[useqs[kj]] dist = unAmbigDist(ski, skj, True)", "receptors, log=None, meta_data=None, delim=\":\"): \"\"\" Collapses identical sequences Argument: useqs (dict): unique sequences", "r.sequence_id = r.sequence_id + \"_\" + r.getField(m) total += maskCodonsLoop(r, clones, cloneseqs, logs,", "scodons[spos]: printDebug(\"Masked %s at position %d\" % (scodons[ospos], ospos), debug) scodons[ospos] = \"NNN\"", "i break elif scodons[i][0] == \".\": scodons[i] = \"N\" + scodons[i][1:3] if scodons[i][1:3]", "1 fails[\"failreads\"] += r.dupcount if mout[1][\"FAIL\"] == \"FRAME-SHIFTING DELETION\": fails[\"del_fail\"] += 1 elif", "counter = 0 for i in gaps: #print(str(i) + \":\" + ncon_seq) if", "range(0, sites, 3): if i == 0: newseqs.append([]) if tallies[i//3] > 0: newseqs[j].append(sequences[j][i:(i+3)])", "IDs. Splits identical sequences with different meta_data. meta_data (str): Field to append to", "= 1 for r in all_records: if target_clones is None or r.clone in", "end of IMGT sequence qpos (int): starting position of input sequence in IMGT", "* len(regions[\"cdr3_imgt\"]) + \\ [120] * len(regions[\"fwr4_imgt\"]) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt)", "+ cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) else: for j in range(0, nseqs):", "output Arguments: out_dir (str): directory for sequence files. useqs_f (dict): unique sequences mapped", "checkArgs, getCommonArgParser, parseCommonArgs def correctMidCodonStart(scodons, qi, debug): \"\"\" Find and mask split codons", "logs (dict): contains log information for each sequence out_dir (str): directory for output", "printError(\"IMGT assignments are not the same within clone %d!\\n\" % c.clone, False) printError(c.getField(\"imgtpartlabels\"),", "!= 0 : printDebug(\"Frame-shifting gap detected! Refusing to include sequence.\", debug) log[\"PASS\"] =", "str(len(germline))) #if len(germline) < len(sequences[i]): # print(\"\\n\" + str(clones[i].sequence_id)) # print(\"\\n \" +", "(--omega) kappa (str): kappa optimization in IgPhyML (-t) motifs (str): motifs to use", "if len(c.getField(\"imgtpartlabels\")) != len(imgtar): printError(\"IMGT assignments are not the same within clone %d!\\n\"", "= hasPTC(mask_seq) if ptcs >= 0: printWarning(\"Masked sequence suddenly has a PTC.. %s\\n\"", "GY94 trees and parameters clean (str): delete intermediate files? (none, all) \"\"\" osplit", "* 3 if (psite + 3 + ins) < len(ros) and (pisite +", "- fails[\"seq_fail\"] log[\"END\"] = \"BuildTrees\" printLog(log) #Run IgPhyML on outputted data? if igphyml:", "1 if fbreak: break return dist def deduplicate(useqs, receptors, log=None, meta_data=None, delim=\":\"): \"\"\"", "1 fails[\"region_fail\"] += 1 return 0 else: #imgt_warn = \"\\n! IMGT FWR/CDR sequence", "correctseqs = False for seqi in range(0, len(sequences)): i = sequences[seqi] if len(i)", "debug: print(receptor.sequence_id) # adjust starting position of query sequence qi = qi[(receptor.v_seq_start -", "(bool): if True remove CDR3 logs (dict): contains log information for each sequence", "fails[\"seq_fail\"] += 1 fails[\"region_fail\"] += 1 return 0 elif regions[\"fwr3_imgt\"] is not \"\"", "dedent( \"\"\" output files: <folder> folder containing fasta and partition files for each", "igphyml_group = parser.add_argument_group(\"IgPhyML arguments (see igphyml -h for details)\") igphyml_group.add_argument(\"--igphyml\", action=\"store_true\", dest=\"igphyml\", help=\"\"\"Run", "+= 1 pass_count += clonesizes[str(k)] else: fails[\"seq_fail\"] -= clonesizes[str(k)] fails[\"minseq_fail\"] -= clonesizes[str(k)] fail_count", "log[\"NPROC\"] = nproc printLog(log) if not nohlp: try: #estimate HLP parameters/trees p =", "useqs_f[conseq_f] = j conseqs.append(conseq) if collapse: useqs_f = deduplicate(useqs_f, clones, logs, meta_data, delim)", "resolveglines: printError(\"%s %s\" % (\"Predicted germlines are not the same among sequences in", "ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--motifs\", action=\"store\", dest=\"motifs\", type=str, default=\"WRC_2:0,GYW_0:1,WA_1:2,TW_0:3,SYC_2:4,GRS_0:5\", help=\"\"\"Which motifs", "help=\"\"\"If specified, remove CDR3 from all sequences.\"\"\") group.add_argument(\"--nmask\", action=\"store_true\", dest=\"nmask\", help=\"\"\"If specified, do", "sequences counting only A,T,C,Gs Arguments: seq1 (str): sequence 1 seq2 (str): sequence 2", "sites - len(germline) germline = germline + \"N\" * (seqdiff) if sites %", "conseq_f in useqs_f and collapse: clones[useqs_f[conseq_f]].dupcount += clones[j].dupcount logs[clones[j].sequence_id][\"PASS\"] = False logs[clones[j].sequence_id][\"FAIL\"] =", "(list): list of codons in IMGT sequence. qi (str) : input sequence. spos", "germline = clones[0].getField(\"germline_imgt\") correctseqs = False for seqi in range(0, len(sequences)): i =", "changeo.IO import splitName, getDbFields, getFormatOperators, getOutputHandle, getOutputName from changeo.Alignment import RegionDefinition from changeo.Commandline", "to sequence IDs. Splits identical sequences with different meta_data. clones (list) : list", "True remove CDR3 logs (dict): contains log information for each sequence out_dir (str):", "suddenly has a PTC.. %s\\n\" % r.sequence_id) mout[1][\"PASS\"] = False mout[1][\"FAIL\"] = \"PTC_ADDED_FROM_MASKING\"", "and lineage trees from running IgPhyML, if specified required fields: sequence_id, sequence, sequence_alignment,", "[] nimgt = [] for i in range(0, len(newseqs)): nseq = [] ncdr3", "detected! Refusing to include sequence.\", debug) log[\"PASS\"] = False log[\"FAIL\"] = \"FRAME-SHIFTING DELETION\"", "range(1,len(meta_data)): germ_id.append(\"GERM\") clonef.write(\">%s_%s\\n\" % (clones[0].clone,\"_\".join(germ_id))) for i in range(0, len(newgerm)): clonef.write(\"%s\" % newgerm[i].replace(\".\",\"-\"))", "None or r.clone in target_clones: if init_clone_sizes[r.clone] >= min_seq: big_enough.append(r) fails[\"totalreads\"] = len(all_records)", "= r.sequence_id.replace(\")\",\"-\") #remove parenthesis from sequence ID r.sequence_id = r.sequence_id.replace(\"(\",\"-\") #remove parenthesis from", "elif spos >= s_end or qcodons[qpos] != scodons[spos]: scodons[ospos] = \"NNN\" if spos", "at which point #we have to shift the frame. This attempts to correct", "if only one in a clone. imgt (list) : IMGT numbering of clonal", "clonal clustering resolveglines = False for c in clones: ngermline = c.getField(\"germline_imgt_d_mask\") if", "parenthesis from sequence ID r.sequence_id = r.sequence_id.replace(\"(\",\"-\") #remove parenthesis from sequence ID if(meta_data", "[108] * len(regions[\"cdr3_imgt\"]) + \\ [120] * len(regions[\"fwr4_imgt\"]) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) !=", "IMGT definition. Arguments: receptor (changeo.Receptor.Receptor): Receptor object. recursive (bool) : was this method", "scodons[spos] != qcodons[qpos]: printDebug(\"Checking \" + scodons[spos]+ \"\\t\" + qcodons[qpos], debug) qpos +=", "%d\" % ins, debug) mout = maskSplitCodons(receptor, recursive=True) if mout[1][\"PASS\"]: #if debug: receptor.sequence_input", "+ 3 + ins) < len(ros) and (pisite + 3) < len(ris): #cut", "logs[clones[j].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(conseqs)) logs[clones[j].sequence_id][\"PASS\"] = False return -len(conseqs)", "(int): starting position of IMGT sequence in input sequence s_end (int): end of", "break after first difference found? Returns: int: number of ACGT differences. \"\"\" if", "if len(seq1) != len(seq2): printError(\"Sequences are not the same length! %s %s\" %", "igphyml_group.add_argument(\"--nohlp\", action=\"store_true\", dest=\"nohlp\", help=\"\"\"Don't run HLP model?\"\"\") igphyml_group.add_argument(\"--asr\", action=\"store\", dest=\"asr\", type=float, default=-1, help=\"\"\"Ancestral", "newgerm = ngerm imgt = nimgt #print(\"Length: \" + str(ncdr3)) useqs_f = OrderedDict()", "ptcs < 0: #If IMGT regions are provided, record their positions rd =", "ID if(meta_data is not None): for m in range(0,len(meta_data)): md = r.getField(meta_data[m]) md", "sequences with different meta_data collapse (bool): if True collapse identical sequences. ncdr3 (bool):", "the adjacent codon was found, something\"s up. log[\"FAIL\"] = \"FAILED_MATCH_QSTRING:\"+str(spos) #figure out if", "log[rfrom.sequence_id][\"FAIL\"] = \"Collapsed with \" + rto.sequence_id del useqs[k] return useqs def hasPTC(sequence):", "sequence and remove them for now gaps = [] ndotgaps = [] nsi", "clonef: if collapse: for seq_f, num in useqs_f.items(): seq = seq_f cid =", "if i == 1: curgap += 1 elif i == 0 and curgap", "= OrderedDict() conseqs = [] for j in range(0, nseqs): conseq = \"\".join([str(seq_rec)", "out 1 or 2 nucleotides downstream of offending codon receptor.sequence_input = ros[0:(psite +", "sequence ID r.sequence_id = r.sequence_id.replace(\"(\",\"-\") #remove parenthesis from sequence ID if(meta_data is not", "for extracting metadata from ID. newgerm (str) : modified germline of clonal lineage.", "e: print(\" \".join(hlp_args)) print('error>', e.output, '<') printError(\"HLP tree building failed\") log = OrderedDict()", "= receptor.sequence_input si = receptor.sequence_imgt log = OrderedDict() log[\"ID\"]=receptor.sequence_id log[\"CLONE\"]=receptor.clone log[\"PASS\"] = True", "between two sequences counting only A,T,C,Gs Arguments: seq1 (str): sequence 1 seq2 (str):", "fails[\"del_fail\"] log[\"FRAMESHIFT_INS\"] = fails[\"in_fail\"] log[\"CLONETOOSMALL\"] = fails[\"minseq_fail\"] log[\"CDRFWR_ERROR\"] = fails[\"region_fail\"] log[\"GERMLINE_PTC\"] = fails[\"germlineptc\"]", "if len(lcodon) == 2: newgerm[-1] = newgerm[-1] + \"N\" elif len(lcodon) == 1:", "\"N\", key,\"_\".join(germ_id), partfile)) handle.close() output = {\"pass\": None, \"fail\": None} if pass_handle is", "(str): kappa optimization in IgPhyML (-t) motifs (str): motifs to use in IgPhyML", "and m != \"DUPCOUNT\": matches += 1 m_match = (matches == len(meta_data)) if", "maxlen = sites maximgt = len(imgtar) for j in range(0,len(sequences)): if len(sequences[j]) >", "a clone (index is value in useqs dict). log (collections.OrderedDict): log of sequence", "log (collections.OrderedDict): log of sequence errors. meta_data (str): Field to append to sequence", "nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", nohlp=False, asr=-1, clean=\"none\"): \"\"\" Run IgPhyML", "motifs, \"--hotness\", hotness, \"--oformat\", oformat, \"--outname\", igphyml_out] if asr >= 0: hlp_args.append(\"--ASRc\") hlp_args.append(str(asr))", "log[\"SCOPE\"] = clean printLog(log) todelete = open(outrep) for line in todelete: line =", "range(0, len(clones[str(k)])): logs[clones[str(k)][j].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(cloneseqs[str(k)])) logs[clones[str(k)][j].sequence_id][\"PASS\"] = False", "topology (t) branch lengths (l) and parameters (r) in IgPhyML. omega (str): omega", "= log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"FWR/CDR error\" logs[r.sequence_id][\"FWRCDRSEQ\"] = simgt fails[\"seq_fail\"]", "metadata md = md.replace(\":\",\"-\") #remove colons from metadata md = md.replace(\",\",\"-\") #remove commas", "Receptor object. oqpos (int) : position of interest in input sequence. ospos (int)", "nseqs (int): number of sequences. delim (str) : delimiter for extracting metadata from", "ncdr3=False, logs=None, fail_writer=None, out_dir=None, min_seq=1): \"\"\" Create intermediate sequence alignment and partition files", "= dedent( \"\"\" output files: <folder> folder containing fasta and partition files for", "append to sequence IDs. Splits identical sequences with different meta_data. meta_data (str): Field", "out_dir (str): directory for output files. fail_writer (changeo.IO.TSVWriter): failed sequences writer object. min_seq", "= fail_handle.name fail_handle.close() if log_handle is not None: log_handle.close() #printProgress(rec_count, rec_count, 0.05, start_time)", "None: fail_writer.writeReceptor(clones[j]) else: useqs_f[conseq_f] = j conseqs.append(conseq) if collapse: useqs_f = deduplicate(useqs_f, clones,", "printError(\"%s\\n%d\\n\" % (imgtar, j)) #Resolve germline if there are differences, e.g. if reconstruction", "in scodons: concatenated_seq += i # add --- gaps back to IMGT sequence", "mask IMGT at that site and scan forward until you find a codon", "sequence (str): IMGT gapped sequence in frame 1. Returns: int: negative if not", "if reconstruction was done before clonal clustering resolveglines = False for c in", "\"--motifs\", motifs, \"--hotness\", hotness, \"--oformat\", oformat, \"--outname\", igphyml_out] if asr >= 0: hlp_args.append(\"--ASRc\")", "meta_data is None: ski = keys[i] skj = keys[j] else: ski, cid =", "is returned as a negative if clonesizes[str(k)] > 0: nclones += 1 pass_count", "preserved\", debug) if \"IN-FRAME\" in log: log[\"IN-FRAME\"] = log[\"IN-FRAME\"] + \",\" + str(spos)", "= r.sequence_imgt logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"FWR/CDR error\" logs[r.sequence_id][\"FWRCDRSEQ\"]", "r.clone in clones: clones[r.clone].append(r) cloneseqs[r.clone].append(mask_seq) else: clones[r.clone] = [r] cloneseqs[r.clone] = [mask_seq] return", "+= clonesizes[str(k)] else: fails[\"seq_fail\"] -= clonesizes[str(k)] fails[\"minseq_fail\"] -= clonesizes[str(k)] fail_count = fails[\"rec_count\"] -", "dist def deduplicate(useqs, receptors, log=None, meta_data=None, delim=\":\"): \"\"\" Collapses identical sequences Argument: useqs", "imgtdiff = len(imgtar)-len(cimgt) sequences[j] = sequences[j] + \"N\"*(seqdiff) last = cimgt[-1] cimgt.extend([last]*(imgtdiff)) clones[j].setField(\"imgtpartlabels\",cimgt)", "returned as a negative if clonesizes[str(k)] > 0: nclones += 1 pass_count +=", "= fails[\"rec_count\"] - pass_count # End clone processing printMessage(\"Done\", start_time=start_time, end=True, width=50) log_handle", "nmask) if total == sample_depth: break # Start processing clones clonesizes = {}", "cid = delim + \"0\" sid = clones[j].sequence_id.translate(transtable)+\"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"),", "in clones, and the number of sequences in clones. \"\"\" sites = len(sequences[0])", "start_time=start_time, end=True, width=50) log_handle = None if out_args[\"log_file\"] is not None: log_handle =", "findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log, debug, recursive=False): \"\"\" Find and mask", "is not None: output[\"pass\"] = pass_handle.name pass_handle.close() if fail_handle is not None: output[\"fail\"]", "out_fields = getDbFields(db_file, reader=reader) # open input file handle = open(db_file, \"r\") records", "OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"SEQ_IN\"] = r.sequence_input log[\"SEQ_IMGT\"] = r.sequence_imgt", "log = OrderedDict() log[\"START\"] = \"IgPhyML HLP analysis\" log[\"OPTIMIZE\"] = optimization log[\"TS/TV\"] =", "with different meta_data. Returns: tuple: tuple of length four containing a list of", "if debug: print(scodons[spos] + \"\\t\" + qcodons[qpos]) if scodons[spos] == \"...\" and qcodons[qpos]", "in target_clones: if init_clone_sizes[r.clone] >= min_seq: big_enough.append(r) fails[\"totalreads\"] = len(all_records) #fails[\"minseq_fail\"] = len(all_records)", "c.getField(\"germline_imgt\") if ngermline != germline: resolveglines = True if resolveglines: printError(\"%s %s\" %", "a recursive call? mask (bool) : mask split codons for use with igphyml?", "% (imgtar,j),False) for j in range(0, len(sequences)): printError(\"%s\\n%s\\n\" % (sequences[j],clones[j].getField(\"imgtpartlabels\")),False) printError(\"ChangeO file needs", "simgt fails[\"seq_fail\"] += 1 fails[\"region_fail\"] += 1 return 0 elif regions[\"fwr3_imgt\"] is not", "= outfile.split(\".\") outrep = \".\".join(osplit[0:(len(osplit)-1)]) + \"_gy.tsv\" gyout = outfile + \"_igphyml_stats_gy.txt\" gy_args", "oformat=oformat, nohlp=nohlp,clean=clean,asr=asr) return output def getArgParser(): \"\"\" Defines the ArgumentParser Returns: argparse.ArgumentParser: argument", "i in range(0, sites, 3): if tallies[i//3] > 0: newgerm.append(germline[i:(i+3)]) lcodon=germline[i:(i+3)] imgt.append(imgtar[i]) if", "ncountj > ncounti: nci = 0 if useqs[ki] in join: nci = ambigchar[join[useqs[ki]]]", "except subprocess.CalledProcessError as e: print(\" \".join(hlp_args)) print('error>', e.output, '<') printError(\"HLP tree building failed\")", "mout[1][\"FAIL\"] == \"SINGLE FRAME-SHIFTING INSERTION\": fails[\"in_fail\"] += 1 else: fails[\"other_fail\"] += 1 else:", "outputIgPhyML(clones[str(k)], cloneseqs[str(k)], meta_data=meta_data, collapse=collapse, ncdr3=ncdr3, logs=logs, fail_writer=fail_writer, out_dir=clone_dir, min_seq=min_seq) #If clone is too", "rmCDR3(sequences, clones): \"\"\" Remove CDR3 from all sequences and germline of a clone", "= [] ngermline = [] ncdr3 = 0 #print(\"imgtarlen: \" + str(len(imgtar))) #print(\"seqlen:", "hotness (str): motif in IgPhyML (--hotness) oformat (str): output format for IgPhyML (tab", "i in range(3,len(names)-1): log[names[i]] = round(float(vals[i]),2) printLog(log) if clean != \"none\": log =", "os.path.join(dir_name, clone_name) if not os.path.exists(clone_dir): os.makedirs(clone_dir) # Format options try: reader, writer, __", "PTCs, position of PTC if found. \"\"\" ptcs = (\"TAA\", \"TGA\", \"TAG\", \"TRA\",", "out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"nf_fail\"] += 1 return 0 # Run IgPhyML", "r.getField(m) total += maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer, mask = not", "to output, if specified.\"\"\") group.add_argument(\"--minseq\", action=\"store\", dest=\"min_seq\", type=int, default=1, help=\"\"\"Minimum number of data", "# print(\"\\n \" + str((sequences[i])) ) # print(\"\\n\" + str((germline))) for j in", "newseqs.append([]) if tallies[i//3] > 0: newseqs[j].append(sequences[j][i:(i+3)]) lcodon = \"\" for i in range(0,", "(bool): If True, only estimate GY94 trees and parameters clean (str): delete intermediate", "find upstream match\" % (scodons[ospos], ospos), debug) log[\"PASS\"]=False log[\"FAIL\"]=\"FAILED_MATCH:\"+str(spos) elif qcodons[qpos] == scodons[spos]:", "are in IMGT sequence and remove them for now gaps = [] ndotgaps", "range(0, len(newseqs)): nseq = [] ncdr3 = 0 for j in range(0, len(imgt)):", "fail_writer.writeReceptor(clones[j]) else: useqs_f[conseq_f] = j conseqs.append(conseq) if collapse: useqs_f = deduplicate(useqs_f, clones, logs,", "or qcodons[qpos] != scodons[spos]: scodons[ospos] = \"NNN\" if spos >= s_end: printDebug(\"Masked %s", "useqs_f and collapse: clones[useqs_f[conseq_f]].dupcount += clones[j].dupcount logs[clones[j].sequence_id][\"PASS\"] = False logs[clones[j].sequence_id][\"FAIL\"] = \"Duplication of", "qpos = 0 if mask: findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log, debug,", "= \"\" for i in range(0,len(si)): if si[i] == \"-\": gaps.append(1) ndotgaps.append(1) else:", "len(simgt) < len(r.sequence_imgt): r.fwr4_imgt = r.fwr4_imgt + (\".\"*(len(r.sequence_imgt) - len(simgt))) simgt = regions[\"fwr1_imgt\"]", "List of clone IDs to analyze. collapse (bool): if True collapse identical sequences.", "same clone (share indexes with clones parameter). meta_data (str): Field to append to", "input file handle = open(db_file, \"r\") records = reader(handle) fail_handle, fail_writer = None,", "mout[1][\"FAIL\"] == \"FRAME-SHIFTING DELETION\": fails[\"del_fail\"] += 1 elif mout[1][\"FAIL\"] == \"SINGLE FRAME-SHIFTING INSERTION\":", "1 m_match = (matches == len(meta_data)) if dist == 0 and m_match: ncounti", "= rd.getRegions(r.sequence_imgt) if regions[\"cdr3_imgt\"] is not \"\" and regions[\"cdr3_imgt\"] is not None: simgt", "not None: c.setField(meta_data[0],c.getField(meta_data_ar[0])) for m in range(1,len(meta_data_ar)): st = c.getField(meta_data[0])+\":\"+c.getField(meta_data_ar[m]) c.setField(meta_data[0],st) if len(c.getField(\"imgtpartlabels\"))", "printDebug from changeo.Defaults import default_format from changeo.IO import splitName, getDbFields, getFormatOperators, getOutputHandle, getOutputName", "if specified.\"\"\") group.add_argument(\"--minseq\", action=\"store\", dest=\"min_seq\", type=int, default=1, help=\"\"\"Minimum number of data sequences. Any", "# loop through list of joined sequences and collapse keys = list(useqs.keys()) for", "logs, fails, out_args, fail_writer, mask=True): \"\"\" Masks codons split by alignment to IMGT", "ins):] receptor.sequence_imgt = ris[0:(pisite + 3)] + ris[(pisite + 3):] # Debug sequence", "in newseqs[j]])+delim+\":\".join(meta_data_list) else: conseq_f = conseq if conseq_f in useqs_f and collapse: clones[useqs_f[conseq_f]].dupcount", "logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"FWR/CDR error\" logs[r.sequence_id][\"FWRCDRSEQ\"] = simgt", "not \"\" and regions[\"fwr3_imgt\"] is not None: simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] +", "True if found_no_funct is False: printWarning(\"FUNCTIONAL column not found.\") found_no_funct = True r.sequence_id", "of interest in IMGT sequence. log (dict) : log of information for each", "scodons[i] != \"...\" and len(scodons[i]) == 3 and scodons[i] != \"NNN\": s_end =", "sequence if only one in a clone. imgt (list) : IMGT numbering of", "metadata md = md.replace(\"(\",\"-\") #remove parenthesis from metadata r.setField(meta_data[m],md) if append is not", "None: meta_data_ar = meta_data[0].split(\",\") for c in clones: if meta_data is not None:", "qi = receptor.sequence_input si = receptor.sequence_imgt log = OrderedDict() log[\"ID\"]=receptor.sequence_id log[\"CLONE\"]=receptor.clone log[\"PASS\"] =", "+ regions[\"fwr3_imgt\"] + nseq imgtpartlabels = [13] * len(regions[\"fwr1_imgt\"]) + [30] * len(regions[\"cdr1_imgt\"])", "command line arguments and calls main \"\"\" # Parse command line arguments parser", "lineages successfully processed records. lineages-fail database records failed processing. igphyml-pass parameter estimates and", "= receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = receptor.sequence_imgt return receptor.sequence_imgt, log else: curgap", "parallelize IgPhyML across.\"\"\") igphyml_group.add_argument(\"--clean\", action=\"store\", choices=(\"none\", \"all\"), dest=\"clean\", type=str, default=\"none\", help=\"\"\"Delete intermediate files?", "outfile.split(\".\") outrep = \".\".join(osplit[0:(len(osplit)-1)]) + \"_gy.tsv\" gyout = outfile + \"_igphyml_stats_gy.txt\" gy_args =", "getDbFields(db_file, reader=reader) # open input file handle = open(db_file, \"r\") records = reader(handle)", "= hasPTC(r.getField(\"germline_imgt_d_mask\")) if gptcs >= 0: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"]", "ncon_seq) if i == 1: ncon_seq = ncon_seq + \".\" elif i ==", "collapse identical sequences. ncdr3 (bool): if True remove all CDR3s. nmask (bool): if", "printError(\"IMGT assignments are not the same within clone %d!\\n\" % c.clone,False) printError(c.getField(\"imgtpartlabels\"),False) printError(\"%s\\n%d\\n\"", "in range(1,len(meta_data)): germ_id.append(\"GERM\") clonef.write(\">%s_%s\\n\" % (clones[0].clone,\"_\".join(germ_id))) for i in range(0, len(newgerm)): clonef.write(\"%s\" %", "1 else: if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"failreads\"] += r.dupcount if mout[1][\"FAIL\"]", "in range(0, nseqs): if sequences[j][i:(i + 3)] != \"...\": tally += 1 tallies.append(tally)", "\"_\")) cid = delim + str(delim.join(meta_data_list)) sid = clones[j].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" %", "Masks codons split by alignment to IMGT reference, then produces input files for", "+= 1 return 0 # Run IgPhyML on outputed data def runIgPhyML(outfile, igphyml_out,", "printWarning(\"Using --clean all with --oformat txt will delete all tree file results.\\n\" \"You'll", "to correct for this by looking at the next codon over in the", "range(0, nseqs): cid = \"\" if meta_data is not None: meta_data_list = []", "ncdr3: ngerm = [] nimgt = [] for i in range(0, len(newseqs)): nseq", "= [] nimgt = [] for i in range(0, len(newseqs)): nseq = []", "if i == 1: ncon_seq = ncon_seq + \".\" elif i == 0:", "bunch of Ns qpos += 1 spos += 1 else: # if not", "= [] ncdr3 = 0 for j in range(0, len(imgt)): if imgt[j] !=", "motifs (str): motifs to use in IgPhyML (--motifs) hotness (str): motif in IgPhyML", "\"Duplication of \" + clones[useqs_f[conseq_f]].sequence_id logs[clones[j].sequence_id][\"DUPLICATE\"]=True if fail_writer is not None: fail_writer.writeReceptor(clones[j]) else:", "if ngermline is \"\": ngermline = c.getField(\"germline_imgt\") if ngermline != germline: resolveglines =", "correctseqs = True if correctseqs: maxlen = sites maximgt = len(imgtar) for j", "corrected\") for j in range(0,len(imgtar)): if c.getField(\"imgtpartlabels\")[j] != imgtar[j]: printError(\"IMGT assignments are not", "or txt) clean (str): delete intermediate files? (none, all) nohlp (bool): If True,", "len(r.sequence_imgt) or simgt != r.sequence_imgt: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] =", "of sequences. delim (str) : delimiter for extracting metadata from ID. newgerm (str)", "= clones[num].sequence_id.translate(transtable) + \"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) else: for", "in range(0, len(si), 3)] # deal with the fact that it's possible to", "= cimgt[-1] cimgt.extend([last]*(imgtdiff)) clones[j].setField(\"imgtpartlabels\",cimgt) if meta_data is not None: meta_data_ar = meta_data[0].split(\",\") for", "1 qpos = 0 if mask: findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log,", "out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"failreads\"] += r.dupcount if mout[1][\"FAIL\"] == \"FRAME-SHIFTING DELETION\":", "(bool): If True, run IgPhyML on outputted data nproc (int) : Number of", "to append to sequence IDs. Splits identical sequences with different meta_data collapse (bool):", "os.remove(outfile) os.remove(gyout) cilog = outrep + \"_igphyml_CIlog.txt_hlp\" if os.path.isfile(cilog): os.remove(cilog) if oformat ==", "regions[\"fwr2_imgt\"]) + \\ [60] * len(regions[\"cdr2_imgt\"]) + [80] * len(regions[\"fwr3_imgt\"]) + \\ [108]", "= keys[i] kj = keys[j] if meta_data is None: ski = keys[i] skj", "= OrderedDict() log[\"OUTPUT\"] = igphyml_out if oformat == \"tab\": igf = open(igphyml_out) names", "= frameshifts if len(scodons[-1]) != 3: if scodons[-1] == \"..\" or scodons[-1] ==", "all sequences.\"\"\") group.add_argument(\"--nmask\", action=\"store_true\", dest=\"nmask\", help=\"\"\"If specified, do not attempt to mask split", "conseqs[j].replace(\".\", \"-\"))) germ_id = [\"GERM\"] if meta_data is not None: for i in", "receptor (changeo.Receptor.Receptor): Receptor object. recursive (bool) : was this method part of a", "duplicate sequences in clones with only 1 sequence? imgtar, germline, sites, nseqs =", "debug, recursive) if not log[\"PASS\"] and not recursive: log[\"FRAMESHIFTS\"] = frameshifts if len(scodons[-1])", "for line in todelete: line = line.rstrip(\"\\n\") line = line.rstrip(\"\\r\") lsplit = line.split(\"\\t\")", "lcodon = \"\" for i in range(0, sites, 3): if tallies[i//3] > 0:", "igphyml_out if oformat == \"tab\": igf = open(igphyml_out) names = igf.readline().split(\"\\t\") vals =", "skj, cid = keys[j].split(delim) ri = receptors[useqs[ki]] rj = receptors[useqs[kj]] dist = unAmbigDist(ski,", "and spos < s_end and scodons[spos] != qcodons[qpos]: printDebug(\"Checking \" + scodons[spos]+ \"\\t\"", "out_args[\"log_file\"] is not None: log_handle = open(out_args[\"log_file\"], \"w\") for j in logs.keys(): printLog(logs[j],", "imgtar[j]: printError(\"IMGT assignments are not the same within clone %d!\\n\" % c.clone, False)", "!= \"none\": log = OrderedDict() log[\"START\"] = \"CLEANING\" log[\"SCOPE\"] = clean printLog(log) todelete", "transtable = clones[0].sequence_id.maketrans(\" \", \"_\") outfile = os.path.join(out_dir, \"%s.fasta\" % clones[0].clone) with open(outfile,", "ski, cid = keys[i].split(delim) skj, cid = keys[j].split(delim) ri = receptors[useqs[ki]] rj =", "dest=\"nproc\", type=int, default=1, help=\"\"\"Number of threads to parallelize IgPhyML across.\"\"\") igphyml_group.add_argument(\"--clean\", action=\"store\", choices=(\"none\",", "input sequence. debug (bool) : print debugging statements. Returns: tuple: (modified input sequence,", "= open(db_file, \"r\") records = reader(handle) fail_handle, fail_writer = None, None if out_args[\"failed\"]:", "cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) if len(useqs_f) == 1 and duplicate: if", "distance between two sequences counting only A,T,C,Gs Arguments: seq1 (str): sequence 1 seq2", "\"\"\" debug = False qi = receptor.sequence_input si = receptor.sequence_imgt log = OrderedDict()", "#Resolve germline if there are differences, e.g. if reconstruction was done before clonal", "from the sequence they will be collapse to. if ncountj > ncounti: nci", "reconstruction was done before clonal clustering resolveglines = False for c in clones:", "will be legitimately absent from the query, at which point #we have to", "+ [80] * len(regions[\"fwr3_imgt\"]) + \\ [108] * int(len(nseq)) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\"))", "< len(qcodons): if debug: print(scodons[spos] + \"\\t\" + qcodons[qpos]) if scodons[spos] == \"...\"", "#remove parenthesis from sequence ID if(meta_data is not None): for m in range(0,len(meta_data)):", "list(useqs.keys()) join = {} # id -> sequence id to join with (least", "starting position of IMGT sequence in input sequence). \"\"\" spos = 0 for", "in big_enough: if r.functional is None: r.functional = True if found_no_funct is False:", "skj = keys[j] else: ski, cid = keys[i].split(delim) skj, cid = keys[j].split(delim) ri", "receptor.sequence_imgt log = OrderedDict() log[\"ID\"]=receptor.sequence_id log[\"CLONE\"]=receptor.clone log[\"PASS\"] = True if debug: print(receptor.sequence_id) #", "out_dir=clone_dir, min_seq=min_seq) #If clone is too small, size is returned as a negative", "with an edited input sequence if not recursive: frameshifts += checkFrameShifts(receptor, oqpos, ospos,", "is False: printWarning(\"FUNCTIONAL column not found.\") found_no_funct = True r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove", "was found, something\"s up. log[\"FAIL\"] = \"FAILED_MATCH_QSTRING:\"+str(spos) #figure out if this was due", "\"\"\" Masks codons split by alignment to IMGT reference Arguments: r (changeo.Receptor.Receptor): receptor", "range(0,len(seq1)): if seq1[i] != \"N\" and seq1[i] != \"-\" and seq1[i] != \".\":", "if meta_data is not None: if meta_data[0] == \"DUPCOUNT\": cid = delim +", "partitioned model on this data.\\n\" imgtpartlabels = [0] * len(r.sequence_imgt) r.setField(\"imgtpartlabels\", imgtpartlabels) mout", "[\"GERM\"] if meta_data is not None: for i in range(1, len(meta_data)): germ_id.append(\"GERM\") pass_handle.write(\"%s\\t%s\\t%s_%s\\t%s\\n\"", "IgPhyML on outputted data nproc (int) : Number of threads to parallelize IgPhyML", "to ids. meta_data (str): Field to append to sequence IDs. Splits identical sequences", "model?\"\"\") igphyml_group.add_argument(\"--asr\", action=\"store\", dest=\"asr\", type=float, default=-1, help=\"\"\"Ancestral sequence reconstruction interval (0-1).\"\"\") return parser", "Find and mask split codons Arguments: scodons (list): list of codons in IMGT", "for IgPhyML (tab or txt) clean (str): delete intermediate files? (none, all) nohlp", "to estimate: e = estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--motifs\", action=\"store\",", "in clones. \"\"\" sites = len(sequences[0]) nseqs = len(sequences) imgtar = clones[0].getField(\"imgtpartlabels\") germline", "(changeo.Receptor.Receptor): Receptor object. recursive (bool) : was this method part of a recursive", "some sequences have ambiguous characters at polymorphic sites def buildTrees(db_file, meta_data=None, target_clones=None, collapse=False,", "mask=True): \"\"\" Identify junction region by IMGT definition. Arguments: receptor (changeo.Receptor.Receptor): Receptor object.", "partfile = os.path.join(out_dir, \"%s.part.txt\" % clones[0].clone) with open(partfile, \"w\") as partf: partf.write(\"%d %d\\n\"", "\"\"\" Collapses identical sequences Argument: useqs (dict): unique sequences within a clone. maps", "joinseqs[ki] = kj else: ncj = 0 if useqs[kj] in join: ncj =", "parser parser = ArgumentParser(description=__doc__, epilog=fields, parents=[parser_parent], formatter_class=CommonHelpFormatter, add_help=False) group = parser.add_argument_group(\"sequence processing arguments\")", "a PTC exits in a sequence Arguments: sequence (str): IMGT gapped sequence in", "1):] #tally where --- gaps are in IMGT sequence and remove them for", "try: #estimate HLP parameters/trees p = subprocess.check_output(hlp_args) except subprocess.CalledProcessError as e: print(\" \".join(hlp_args))", "str(spos) else: log[\"END-MASKED\"] = str(spos) else: printDebug(\"Masked %s at position %d, but couldn't", "igphyml_group.add_argument(\"--asr\", action=\"store\", dest=\"asr\", type=float, default=-1, help=\"\"\"Ancestral sequence reconstruction interval (0-1).\"\"\") return parser if", "in join: nci = ambigchar[join[useqs[ki]]] if nci < ncountj: join[useqs[ki]] = useqs[kj] joinseqs[ki]", "is None: clone_dir = clone_name else: clone_dir = os.path.join(dir_name, clone_name) if not os.path.exists(clone_dir):", "= ncon_seq + concatenated_seq[counter] counter += 1 ncon_seq = ncon_seq + concatenated_seq[counter:] concatenated_seq", "file name. igphymlout (str): igphyml output file nproc (int): Number of threads to", "qpos < len(qcodons) and spos < s_end and scodons[spos] != qcodons[qpos]: printDebug(\"Checking \"", "if r.dupcount is None: r.dupcount = 1 fails[\"rec_count\"] += 1 #printProgress(rec_count, rec_count, 0.05,", "!= seq2[i]: dist += 1 if fbreak: break return dist def deduplicate(useqs, receptors,", "clone. maps sequence to index in Receptor list. receptors (dict): receptors within a", "\"\"\") # Parent parser parser_parent = getCommonArgParser(out_file=False, log=True, format=True) # Define argument parser", "kj.count(\"T\") + kj.count(\"G\") + kj.count(\"C\") ambigchar[useqs[ki]] = ncounti ambigchar[useqs[kj]] = ncountj # this", "r.sequence_id = r.sequence_id.replace(\"(\",\"-\") #remove parenthesis from sequence ID if(meta_data is not None): for", "number of sequences. delim (str) : delimiter for extracting metadata from ID. newgerm", "sequence headers.\"\"\") group.add_argument(\"--clones\", nargs=\"+\", action=\"store\", dest=\"target_clones\", help=\"\"\"List of clone IDs to output, if", "1: newgerm[-1] = newgerm[-1] + \"NN\" if ncdr3: ngerm = [] nimgt =", "gaps = [] ndotgaps = [] nsi = \"\" for i in range(0,len(si)):", "not None): for m in range(0,len(meta_data)): md = r.getField(meta_data[m]) md = md.replace(\",\",\"-\") #remove", "os import random import subprocess import multiprocessing as mp from argparse import ArgumentParser", "clones[0].clone,clones[0].sequence_id,sequences[0])) return imgtar, germline, sites, nseqs def outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs,", "position %d, at end of subject sequence\" % (scodons[ospos], ospos), debug) if \"END-MASKED\"", "IMGT sequences can end on gaps for i in range(spos, len(scodons)): if scodons[i]", "\".\": scodons[-1] = \"...\" else: scodons[-1] = \"NNN\" if \"END-MASKED\" in log: log[\"END-MASKED\"]", "printDebug(\"%s %s\" % (scodons[i], qi[0:3]), debug) if scodons[i] != \"...\": if scodons[i][0:2] ==", "occurs or masking fails. 1: returns 1 masking succeeds \"\"\" if r.clone is", "input sequence spos (int): starting position of IMGT sequence in input sequence s_end", "nargs=\"+\", action=\"store\", dest=\"append\", help=\"\"\"List of columns to append to sequence ID to ensure", "of input sequence in IMGT sequence log (dict): log of information for each", "is not None: c.setField(meta_data[0],c.getField(meta_data_ar[0])) for m in range(1,len(meta_data_ar)): st = c.getField(meta_data[0])+\":\"+c.getField(meta_data_ar[m]) c.setField(meta_data[0],st) if", "different meta_data. clones (list) : list of receptor objects. collapse (bool) : deduplicate", "parameter info log = OrderedDict() log[\"START\"] = \"BuildTrees\" log[\"FILE\"] = os.path.basename(db_file) log[\"COLLAPSE\"] =", "not PTCs, position of PTC if found. \"\"\" ptcs = (\"TAA\", \"TGA\", \"TAG\",", "nclones += 1 pass_count += clonesizes[str(k)] else: fails[\"seq_fail\"] -= clonesizes[str(k)] fails[\"minseq_fail\"] -= clonesizes[str(k)]", "elif qcodons[qpos] == scodons[spos]: printDebug(\"Masked %s at position %d\" % (scodons[ospos], ospos), debug)", "clones parameter). meta_data (str): Field to append to sequence IDs. Splits identical sequences", "meta_data) tallies = [] for i in range(0, sites, 3): tally = 0", "database file. meta_data (str): Field to append to sequence IDs. Splits identical sequences", "logs[clones[num].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(useqs_f)) logs[clones[num].sequence_id][\"PASS\"] = False return -len(useqs_f)", "clones (list) : list of receptor objects. collapse (bool) : deduplicate sequences. nseqs", "i in range(0, len(scodons)): printDebug(\"%s %s\" % (scodons[i], qi[0:3]), debug) if scodons[i] !=", ": printDebug(\"Frame-shifting gap detected! Refusing to include sequence.\", debug) log[\"PASS\"] = False log[\"FAIL\"]", "in IgPhyML failed\") log = OrderedDict() log[\"START\"] = \"IgPhyML HLP analysis\" log[\"OPTIMIZE\"] =", "meta_data): \"\"\" Characterize potential mismatches between IMGT labels within a clone Arguments: sequences", "Any clones with fewer than the specified number of sequences will be excluded.\"\"\")", "if meta_data is None: ski = keys[i] skj = keys[j] else: ski, cid", "that yourself.\") log = OrderedDict() log[\"END\"] = \"IgPhyML analysis\" printLog(log) # Note: Collapse", "mout[1][\"PASS\"]: #if debug: receptor.sequence_input = ros receptor.sequence_imgt = ris frameshifts += 1 printDebug(\"FRAMESHIFT", "# possible that SEQ-IMGT ends on a bunch of Ns qpos += 1", "parser if __name__ == \"__main__\": \"\"\" Parses command line arguments and calls main", "+ [108] * len(regions[\"cdr3_imgt\"]) + \\ [120] * len(regions[\"fwr4_imgt\"]) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\"))", "igphyml: igphyml_out = getOutputName(db_file, out_label=\"igphyml-pass\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=oformat) dir_name, __ = os.path.split(pass_handle.name) if", "(str) : delimiter for extracting metadata from ID. newgerm (str) : modified germline", "parameter estimates and lineage trees from running IgPhyML, if specified required fields: sequence_id,", "to include in output fasta file sequence headers.\"\"\") group.add_argument(\"--clones\", nargs=\"+\", action=\"store\", dest=\"target_clones\", help=\"\"\"List", "action=\"store\", dest=\"kappa\", type=str, default=\"e\", choices=(\"e\", \"ce\"), help=\"\"\"Kappa parameters to estimate: e = estimate,", "if (psite + 3 + ins) < len(ros) and (pisite + 3) <", "qpos, log, debug, recursive=False): \"\"\" Find and mask split codons Arguments: receptor (changeo.Receptor.Receptor):", "sequence IDs. Splits identical sequences with different meta_data. meta_data (str): Field to append", "int(len(nseq)) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or simgt != r.sequence_imgt: log =", "characterizePartitionErrors(sequences, clones, meta_data): \"\"\" Characterize potential mismatches between IMGT labels within a clone", "clones[0].getField(\"germline_imgt_d_mask\") if germline is \"\": germline = clones[0].getField(\"germline_imgt\") correctseqs = False for seqi", "Open output files out_label = \"lineages\" pass_handle = getOutputHandle(db_file, out_label=out_label, out_dir=out_args[\"out_dir\"], out_name= out_args[\"out_name\"],", "output preferences. Returns: dict: dictionary of output pass and fail files. \"\"\" #", "s_end and qpos < len(qcodons): if debug: print(scodons[spos] + \"\\t\" + qcodons[qpos]) if", "i = sequences[seqi] if len(i) != sites or len(clones[seqi].getField(\"imgtpartlabels\")) != len(imgtar): correctseqs =", "> 0: newseqs[j].append(sequences[j][i:(i+3)]) lcodon = \"\" for i in range(0, sites, 3): if", "< min_seq: for j in range(0, len(clones[str(k)])): logs[clones[str(k)][j].sequence_id][\"FAIL\"] = \"Clone too small: \"", "key) if clonesizes[key] > 0: germ_id = [\"GERM\"] if meta_data is not None:", "kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", nohlp=False, asr=-1, clean=\"none\"): \"\"\" Run IgPhyML on outputted data", "in newseqs[j]]) if meta_data is not None: meta_data_list = [] for m in", "for j in range(0,len(sequences)): cimgt = clones[j].getField(\"imgtpartlabels\") seqdiff = maxlen - len(sequences[j]) imgtdiff", "+ \"0\" sid = clones[j].sequence_id.translate(transtable)+\"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) germ_id", "r.functional is None: r.functional = True if found_no_funct is False: printWarning(\"FUNCTIONAL column not", "for j in range(0, len(sequences)): printError(\"%s\\n%s\\n\" % (sequences[j],clones[j].getField(\"imgtpartlabels\")),False) printError(\"ChangeO file needs to be", "igphyml output file nproc (int): Number of threads to parallelize IgPhyML across optimization", "regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + nseq imgtpartlabels = [13] * len(regions[\"fwr1_imgt\"])", "and seq2[i] != \".\": if seq1[i] != seq2[i]: dist += 1 if fbreak:", "log[\"PASS\"] = False log[\"FAIL\"] = \"FRAME-SHIFTING DELETION\" log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt", "pass_handle.write(str(nclones)+\"\\n\") for key in sorted(clonesizes, key=clonesizes.get, reverse=True): #print(key + \"\\t\" + str(clonesizes[key])) outfile", "= line.split(\"\\t\") if len(lsplit) == 4: os.remove(lsplit[0]) os.remove(lsplit[1]) os.remove(lsplit[3]) todelete.close() os.remove(outrep) os.remove(outfile) os.remove(gyout)", "\"..\" or scodons[-1] == \".\": scodons[-1] = \"...\" else: scodons[-1] = \"NNN\" if", "log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"FWR/CDR error\" logs[r.sequence_id][\"FWRCDRSEQ\"] = simgt fails[\"seq_fail\"] +=", "Masks codons split by alignment to IMGT reference Arguments: r (changeo.Receptor.Receptor): receptor object", "sequences[j] + \"N\"*(seqdiff) last = cimgt[-1] cimgt.extend([last]*(imgtdiff)) clones[j].setField(\"imgtpartlabels\",cimgt) if meta_data is not None:", "ncdr3 += 1 clones[i].setField(\"imgtpartlabels\",nimgtar) clones[i].setField(\"germline_imgt_d_mask\", \"\".join(ngermline)) sequences[i] = \"\".join(nseq) #print(\"Length: \" + str(ncdr3))", "== \".\": scodons[i] = \"N\" + scodons[i][1:3] if scodons[i][1:3] != qi[1:3] or scodons[i+1]", "and duplicate: if meta_data is not None: if meta_data[0] == \"DUPCOUNT\": cid =", "if meta_data[0] == \"DUPCOUNT\": cid = delim + \"0\" sid = clones[j].sequence_id.translate(transtable)+\"_1\" +", "within a clone Arguments: sequences (list): list of sequences in clones. clones (list):", "log[\"FAIL\"] = \"NONFUNCTIONAL/PTC\" log[\"SEQ_IN\"] = r.sequence_input logs[r.sequence_id] = log if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"]", "\"GY\", \"--run_id\", \"gy\", \"--outrep\", outrep, \"--threads\", str(nproc),\"--outname\",gyout] hlp_args = [\"igphyml\",\"--repfile\", outrep, \"-m\", \"HLP\",", "if meta_data is not None: meta_data_list = [] for m in range(0,len(meta_data)): if", ">= s_end or qcodons[qpos] != scodons[spos]: scodons[ospos] = \"NNN\" if spos >= s_end:", "failures. out_args (dict): arguments for output preferences. fail_writer (changeo.IO.TSVWriter): failed sequences writer object.", "sequence_id igphyml (bool): If True, run IgPhyML on outputted data nproc (int) :", "igphyml_group.add_argument(\"--igphyml\", action=\"store_true\", dest=\"igphyml\", help=\"\"\"Run IgPhyML on output?\"\"\") igphyml_group.add_argument(\"--nproc\", action=\"store\", dest=\"nproc\", type=int, default=1, help=\"\"\"Number", "md = md.replace(\")\",\"-\") #remove parenthesis from metadata md = md.replace(\"(\",\"-\") #remove parenthesis from", "for seq_f, num in useqs_f.items(): logs[clones[num].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(useqs_f))", "(sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) germ_id = [\"GERM\"] if meta_data is not None: for i", "range(0, nseqs): conseq = \"\".join([str(seq_rec) for seq_rec in newseqs[j]]) if meta_data is not", "from argparse import ArgumentParser from collections import OrderedDict from textwrap import dedent from", "by 3! len: %d, clone: %s , id: %s, seq: %s\" %(len(sequences[0]),\\ clones[0].clone,clones[0].sequence_id,sequences[0]))", "range(0,len(sequences)): imgtar = clones[i].getField(\"imgtpartlabels\") germline = clones[i].getField(\"germline_imgt_d_mask\") nseq = [] nimgtar = []", "ospos * 3 if (psite + 3 + ins) < len(ros) and (pisite", "and mask split codons Arguments: receptor (changeo.Receptor.Receptor): Receptor object. scodons (list): list of", "concatenated_seq += i # add --- gaps back to IMGT sequence ncon_seq =", "imgt) if collapse: return len(useqs_f) else: return nseqs def maskCodonsLoop(r, clones, cloneseqs, logs,", "\"--oformat\", oformat, \"--outname\", igphyml_out] if asr >= 0: hlp_args.append(\"--ASRc\") hlp_args.append(str(asr)) log = OrderedDict()", "+ str(clonesizes[key])) outfile = os.path.join(clone_dir, \"%s.fasta\" % key) partfile = os.path.join(clone_dir, \"%s.part.txt\" %", "and collapse: clones[useqs_f[conseq_f]].dupcount += clones[j].dupcount logs[clones[j].sequence_id][\"PASS\"] = False logs[clones[j].sequence_id][\"FAIL\"] = \"Duplication of \"", "directory for sequence files. useqs_f (dict): unique sequences mapped to ids. meta_data (str):", "the same clone (share indexes with clones parameter). meta_data (str): Field to append", "pass_handle.name pass_handle.close() if fail_handle is not None: output[\"fail\"] = fail_handle.name fail_handle.close() if log_handle", "\"\"\" keys = list(useqs.keys()) join = {} # id -> sequence id to", "regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] if len(simgt) <", "by three curgap = 0 for i in ndotgaps: if i == 1:", "length four containing a list of IMGT positions for first sequence in clones,", "== scodons[ospos]: #if codon in previous position is equal to original codon, it", "is None or r.clone in target_clones: if init_clone_sizes[r.clone] >= min_seq: big_enough.append(r) fails[\"totalreads\"] =", "identical sequences before exporting to fasta.\"\"\") group.add_argument(\"--ncdr3\", action=\"store_true\", dest=\"ncdr3\", help=\"\"\"If specified, remove CDR3", "min_seq (int): minimum number of sequences per clone append (str): column name to", "+= 1 printDebug(\"FRAMESHIFT of length %d!\" % ins, debug) log[\"FAIL\"] = \"SINGLE FRAME-SHIFTING", "qcodons[qpos]) if scodons[spos] == \"...\" and qcodons[qpos] != \"...\": #if IMGT gap, move", "or scodons[i+1] != qi[3:6]: qi = \"N\" + qi spos = i break", "concatenated_seq = Seq(\"\") for i in scodons: concatenated_seq += i # add ---", "\"IgPhyML HLP analysis\" log[\"OPTIMIZE\"] = optimization log[\"TS/TV\"] = kappa log[\"wFWR,wCDR\"] = omega log[\"MOTIFS\"]", "nohlp=False, asr=-1, clean=\"none\"): \"\"\" Run IgPhyML on outputted data Arguments: outfile (str): Output", "useqs_f.items(): seq = seq_f cid = \"\" if meta_data is not None: seq,", "Arguments: receptor (changeo.Receptor.Receptor): Receptor object. recursive (bool) : was this method part of", "before clonal clustering resolveglines = False for c in clones: ngermline = c.getField(\"germline_imgt_d_mask\")", "os.remove(outrep) os.remove(outfile) os.remove(gyout) cilog = outrep + \"_igphyml_CIlog.txt_hlp\" if os.path.isfile(cilog): os.remove(cilog) if oformat", "data? if igphyml: runIgPhyML(pass_handle.name, igphyml_out=igphyml_out, clone_dir=clone_dir, nproc=nproc, optimization=optimization, omega=omega, kappa=kappa, motifs=motifs, hotness=hotness, oformat=oformat,", "found_no_funct = True r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID r.sequence_id =", "\"\\n! IMGT FWR/CDR sequence columns not detected.\\n! Cannot run CDR/FWR partitioned model on", "\"\" and regions[\"fwr3_imgt\"] is not None: simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"]", "igphyml-pass parameter estimates and lineage trees from running IgPhyML, if specified required fields:", "a list of IMGT positions for first sequence in clones, the germline sequence", "oqpos*3 pisite = ospos * 3 if (psite + 3 + ins) <", "not the same, mask IMGT at that site and scan forward until you", "this method part of a recursive call? mask (bool) : mask split codons", "spos, s_end, qpos, log, debug, recursive=False): \"\"\" Find and mask split codons Arguments:", "[] ndotgaps = [] nsi = \"\" for i in range(0,len(si)): if si[i]", "delim + str(delim.join(meta_data_list)) sid = clones[j].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\")))", "meta_data is not None: meta_data_list = [] for m in meta_data: meta_data_list.append(clones[j].getField(m).replace(\":\", \"_\"))", "else: printWarning(\"Using --clean all with --oformat txt will delete all tree file results.\\n\"", "= False #if no match for the adjacent codon was found, something\"s up.", "for the adjacent codon was found, something\"s up. log[\"FAIL\"] = \"FAILED_MATCH_QSTRING:\"+str(spos) #figure out", "= correctMidCodonStart(scodons, qi, debug) qcodons = [qi[i:i + 3] for i in range(0,", "CDR3 from all sequences and germline of a clone Arguments: sequences (list): list", "in todelete: line = line.rstrip(\"\\n\") line = line.rstrip(\"\\r\") lsplit = line.split(\"\\t\") if len(lsplit)", "default=1, help=\"\"\"Minimum number of data sequences. Any clones with fewer than the specified", "str(spos) else: log[\"PASS\"] = False log[\"FAIL\"] = \"UNKNOWN\" def maskSplitCodons(receptor, recursive=False, mask=True): \"\"\"", "= concatenated_seq return concatenated_seq, log def unAmbigDist(seq1, seq2, fbreak=False): \"\"\" Calculate the distance", "e.output, '<') printError(\"GY94 tree building in IgPhyML failed\") log = OrderedDict() log[\"START\"] =", "= maxlen - len(sequences[j]) imgtdiff = len(imgtar)-len(cimgt) sequences[j] = sequences[j] + \"N\"*(seqdiff) last", "with different meta_data target_clones (str): List of clone IDs to analyze. collapse (bool):", "j in range(0,len(sequences)): if len(sequences[j]) > maxlen: maxlen = len(sequences[j]) if len(clones[j].getField(\"imgtpartlabels\")) >", "= ospos * 3 if (psite + 3 + ins) < len(ros) and", "+ si[i] if si[i] != \".\": ndotgaps.append(0) #find any gaps not divisible by", "in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" + str(len(scodons)) else: log[\"END-MASKED\"] = str(spos)", "%d!\" % ins, debug) log[\"FAIL\"] = \"SINGLE FRAME-SHIFTING INSERTION\" break else: receptor.sequence_input =", "True if meta_data is not None: matches = 0 for m in meta_data:", "in range(1, len(meta_data)): germ_id.append(\"GERM\") pass_handle.write(\"%s\\t%s\\t%s_%s\\t%s\\n\" % (outfile, \"N\", key,\"_\".join(germ_id), partfile)) handle.close() output =", "if igphyml: igphyml_out = getOutputName(db_file, out_label=\"igphyml-pass\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=oformat) dir_name, __ = os.path.split(pass_handle.name)", "sequence\" % (scodons[ospos], ospos), debug) if \"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] +", "s_end (int): end of IMGT sequence qpos (int): starting position of input sequence", "of clone IDs to analyze. collapse (bool): if True collapse identical sequences. ncdr3", "import Seq from functools import partial # Presto and changeo imports from presto.Defaults", "fbreak (bool): break after first difference found? Returns: int: number of ACGT differences.", "useqs dict). log (collections.OrderedDict): log of sequence errors. meta_data (str): Field to append", "in clones with only 1 sequence? imgtar, germline, sites, nseqs = characterizePartitionErrors(sequences, clones,", "failed sequences writer object. min_seq (int): minimum number of data sequences to include.", "sequence if not recursive: frameshifts += checkFrameShifts(receptor, oqpos, ospos, log, debug) elif spos", "with only 1 sequence? imgtar, germline, sites, nseqs = characterizePartitionErrors(sequences, clones, meta_data) tallies", "do not attempt to mask split codons.\"\"\") group.add_argument(\"--md\", nargs=\"+\", action=\"store\", dest=\"meta_data\", help=\"\"\"List of", "cid = delim + str(delim.join(meta_data_list)) sid = clones[j].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"),", "correctMidCodonStart(scodons, qi, debug) qcodons = [qi[i:i + 3] for i in range(0, len(qi),", "if spos >= s_end: printDebug(\"Masked %s at position %d, at end of subject", "cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) germ_id = [\"GERM\"] if meta_data is not", "\"N\" * (seqdiff) if sites % 3 != 0: printError(\"number of sites must", "= receptor.sequence_imgt return receptor.sequence_imgt, log else: curgap = 0 si = nsi scodons", "\",\" + str(spos) else: log[\"IN-FRAME\"] = str(spos) elif qpos >= len(qcodons) and spos", "sequences with different meta_data target_clones (str): List of clone IDs to analyze. collapse", "[0] * len(r.sequence_imgt) r.setField(\"imgtpartlabels\", imgtpartlabels) mout = maskSplitCodons(r, mask=mask) mask_seq = mout[0] ptcs", "gap spos += 1 while qpos < len(qcodons) and spos < s_end and", "\"\" and regions[\"cdr3_imgt\"] is not None: simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"]", "int: negative if not PTCs, position of PTC if found. \"\"\" ptcs =", "igphyml? Returns: str: modified IMGT gapped sequence. log: dict of sequence information \"\"\"", "printError(c.getField(\"imgtpartlabels\"),False) printError(\"%s\\n%d\\n\" % (imgtar,j),False) for j in range(0, len(sequences)): printError(\"%s\\n%s\\n\" % (sequences[j],clones[j].getField(\"imgtpartlabels\")),False) printError(\"ChangeO", "os.rmdir(clone_dir) else: printWarning(\"Using --clean all with --oformat txt will delete all tree file", "keys[j] else: ski, cid = keys[i].split(delim) skj, cid = keys[j].split(delim) ri = receptors[useqs[ki]]", "\"FRAME-SHIFTING DELETION\" log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = receptor.sequence_imgt return receptor.sequence_imgt,", "and scodons[i] != \"NNN\": s_end = i printDebug(\"%i:%i:%s\" % (s_end, len(scodons), scodons[s_end]), debug)", "!= qi[1:3] or scodons[i+1] != qi[3:6]: qi = \"N\" + qi spos =", "sequence ID if(meta_data is not None): for m in range(0,len(meta_data)): md = r.getField(meta_data[m])", "% 3 != 0 : printDebug(\"Frame-shifting gap detected! Refusing to include sequence.\", debug)", "< s_end: printDebug(\"FAILING MATCH\", debug) log[\"PASS\"] = False #if no match for the", "seq_f.split(delim) cid = delim + cid.replace(\":\", \"_\") sid = clones[num].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\"", "if debug: print(\"checking %s at position %d %d\" % (scodons[spos], spos, qpos)) ospos=spos", "= clones[0].getField(\"germline_imgt_d_mask\") if germline is \"\": germline = clones[0].getField(\"germline_imgt\") correctseqs = False for", "ski = keys[i] skj = keys[j] else: ski, cid = keys[i].split(delim) skj, cid", "+ str(len(scodons)) else: log[\"END-MASKED\"] = str(spos) concatenated_seq = Seq(\"\") for i in scodons:", "for FWR,CDR respectively: e = estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"-t\",", "reader, writer, __ = getFormatOperators(format) except ValueError: printError(\"Invalid format %s.\" % format) out_fields", "delim, newgerm, conseqs, duplicate, imgt) if collapse: return len(useqs_f) else: return nseqs def", "print(\" \".join(gy_args)) print('error>', e.output, '<') printError(\"GY94 tree building in IgPhyML failed\") log =", "are not the same among sequences in the same clone.\", \"Be sure to", "zero # distance from the sequence they will be collapse to. if ncountj", "!= 0: printError(\"number of sites must be divisible by 3! len: %d, clone:", "(--motifs) hotness (str): motif in IgPhyML (--hotness) oformat (str): output format for IgPhyML", "(imgtar,j),False) for j in range(0, len(sequences)): printError(\"%s\\n%s\\n\" % (sequences[j],clones[j].getField(\"imgtpartlabels\")),False) printError(\"ChangeO file needs to", "sample_depth (int): depth of subsampling before deduplication min_seq (int): minimum number of sequences", "import splitName, getDbFields, getFormatOperators, getOutputHandle, getOutputName from changeo.Alignment import RegionDefinition from changeo.Commandline import", "debug) log[\"PASS\"] = False log[\"FAIL\"] = \"FRAME-SHIFTING DELETION\" log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] =", "r.clone log[\"SEQ_IN\"] = r.sequence_input log[\"SEQ_IMGT\"] = r.sequence_imgt logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"] = False", "else: useqs_f[conseq_f] = j conseqs.append(conseq) if collapse: useqs_f = deduplicate(useqs_f, clones, logs, meta_data,", "[\"igphyml\",\"--repfile\", outrep, \"-m\", \"HLP\", \"--run_id\", \"hlp\", \"--threads\", str(nproc), \"-o\", optimization, \"--omega\", omega, \"-t\",", "= i break else: spos = i break return qi, spos def checkFrameShifts(receptor,", "for sequence files. useqs_f (dict): unique sequences mapped to ids. meta_data (str): Field", "HLP analysis\" log[\"OPTIMIZE\"] = optimization log[\"TS/TV\"] = kappa log[\"wFWR,wCDR\"] = omega log[\"MOTIFS\"] =", "= pass_handle.name pass_handle.close() if fail_handle is not None: output[\"fail\"] = fail_handle.name fail_handle.close() if", "\"\"\" sites = len(sequences[0]) nseqs = len(sequences) imgtar = clones[0].getField(\"imgtpartlabels\") germline = clones[0].getField(\"germline_imgt_d_mask\")", "are not the same length! %s %s\" % (seq1, seq2)) dist = 0", "within clone %d!\\n\" % c.clone,False) printError(c.getField(\"imgtpartlabels\"),False) printError(\"%s\\n%d\\n\" % (imgtar,j),False) for j in range(0,", "be subsampled (before deduplication).\"\"\") group.add_argument(\"--append\", nargs=\"+\", action=\"store\", dest=\"append\", help=\"\"\"List of columns to append", "in IMGT sequence. qi (str) : input sequence. spos (int) : starting position", "in ptcs: return i return -1 def rmCDR3(sequences, clones): \"\"\" Remove CDR3 from", "% (s_end, len(scodons), scodons[s_end]), debug) s_end += 1 qpos = 0 if mask:", "help=\"\"\"If specified, do not attempt to mask split codons.\"\"\") group.add_argument(\"--md\", nargs=\"+\", action=\"store\", dest=\"meta_data\",", "unique sequences within a clone. maps sequence to index in Receptor list. receptors", "sequence IDs. Splits identical sequences with different meta_data target_clones (str): List of clone", "from changeo.IO import splitName, getDbFields, getFormatOperators, getOutputHandle, getOutputName from changeo.Alignment import RegionDefinition from", "not None: simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\", "len(seq1) != len(seq2): printError(\"Sequences are not the same length! %s %s\" % (seq1,", "action=\"store_true\", dest=\"nohlp\", help=\"\"\"Don't run HLP model?\"\"\") igphyml_group.add_argument(\"--asr\", action=\"store\", dest=\"asr\", type=float, default=-1, help=\"\"\"Ancestral sequence", "Returns: 0: returns 0 if an error occurs or masking fails. 1: returns", ": print debugging statements. Returns: tuple: (modified input sequence, modified starting position of", "are not the same within clone %d!\\n\" % c.clone, False) printError(c.getField(\"imgtpartlabels\"), False) printError(\"%s\\n%d\\n\"", "log[\"SEQ_IMGT\"] = r.sequence_imgt logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"FWR/CDR error\"", "Returns: tuple: (modified input sequence, modified starting position of IMGT sequence in input", "for now gaps = [] ndotgaps = [] nsi = \"\" for i", "ncon_seq + concatenated_seq[counter] counter += 1 ncon_seq = ncon_seq + concatenated_seq[counter:] concatenated_seq =", "0 for m in meta_data: if ri.getField(m) == rj.getField(m) and m != \"DUPCOUNT\":", "sequence in clones, and the number of sequences in clones. \"\"\" sites =", "Receptor list. receptors (dict): receptors within a clone (index is value in useqs", "\".\": if seq2[i] != \"N\" and seq2[i] != \"-\" and seq2[i] != \".\":", "fails (dict): counts of various sequence processing failures. out_args (dict): arguments for output", "\"DUPCOUNT\": matches += 1 m_match = (matches == len(meta_data)) if dist == 0", "records = reader(handle) fail_handle, fail_writer = None, None if out_args[\"failed\"]: fail_handle = getOutputHandle(db_file,", "IMGT numbering of clonal positions . \"\"\" # bootstrap these data if desired", "#remove commas from metadata md = md.replace(\")\",\"-\") #remove parenthesis from metadata md =", "if some sequences have ambiguous characters at polymorphic sites def buildTrees(db_file, meta_data=None, target_clones=None,", "parameters to estimate for FWR,CDR respectively: e = estimate, ce = estimate +", "return i return -1 def rmCDR3(sequences, clones): \"\"\" Remove CDR3 from all sequences", "formatter_class=CommonHelpFormatter, add_help=False) group = parser.add_argument_group(\"sequence processing arguments\") group.add_argument(\"--collapse\", action=\"store_true\", dest=\"collapse\", help=\"\"\"If specified, collapse", "partf.write(\",\".join(map(str, imgt))) partf.write(\"\\n\") def outputIgPhyML(clones, sequences, meta_data=None, collapse=False, ncdr3=False, logs=None, fail_writer=None, out_dir=None, min_seq=1):", "1 tallies.append(tally) newseqs = [] # remove gap only sites from observed data", "append: r.sequence_id = r.sequence_id + \"_\" + r.getField(m) total += maskCodonsLoop(r, clones, cloneseqs,", "True if debug: print(receptor.sequence_id) # adjust starting position of query sequence qi =", "partf.write(\"\\n\") def outputIgPhyML(clones, sequences, meta_data=None, collapse=False, ncdr3=False, logs=None, fail_writer=None, out_dir=None, min_seq=1): \"\"\" Create", "sequences in clones. clones (list): list of Receptor objects. \"\"\" for i in", "\"DUPCOUNT\": cid = delim + \"0\" sid = clones[j].sequence_id.translate(transtable)+\"_1\" + cid clonef.write(\">%s\\n%s\\n\" %", "!= germline: resolveglines = True if resolveglines: printError(\"%s %s\" % (\"Predicted germlines are", "clones[0].sequence_id.maketrans(\" \", \"_\") outfile = os.path.join(out_dir, \"%s.fasta\" % clones[0].clone) with open(outfile, \"w\") as", "None: output[\"fail\"] = fail_handle.name fail_handle.close() if log_handle is not None: log_handle.close() #printProgress(rec_count, rec_count,", "ArgumentParser from collections import OrderedDict from textwrap import dedent from time import time", "optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", nohlp=False, asr=-1, clean=\"none\"): \"\"\" Run IgPhyML on", "help=\"\"\"List of columns to append to sequence ID to ensure uniqueness.\"\"\") igphyml_group =", ": IMGT numbering of clonal positions . \"\"\" # bootstrap these data if", "position of PTC if found. \"\"\" ptcs = (\"TAA\", \"TGA\", \"TAG\", \"TRA\", \"TRG\",", "IgPhyML across optimization (str): Optimize combination of topology (t) branch lengths (l) and", "Splits identical sequences with different meta_data. clones (list) : list of receptor objects.", "seq_rec in newseqs[j]])+delim+\":\".join(meta_data_list) else: conseq_f = conseq if conseq_f in useqs_f and collapse:", "= log[\"MASKED\"] + \",\" + str(spos) else: log[\"MASKED\"] = str(spos) else: log[\"PASS\"] =", "3)] + ros[(psite + 3 + ins):] receptor.sequence_imgt = ris[0:(pisite + 3)] +", "\"_\" + r.getField(m) total += maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer, mask", "else: ski, cid = keys[i].split(delim) skj, cid = keys[j].split(delim) ri = receptors[useqs[ki]] rj", "do that yourself.\") log = OrderedDict() log[\"END\"] = \"IgPhyML analysis\" printLog(log) # Note:", "is None: __, clone_name, __ = splitName(db_file) else: clone_name = out_args[\"out_name\"] if dir_name", "# Call main for each input file for f in args.__dict__[\"db_files\"]: args_dict[\"db_file\"] =", "in IgPhyML. omega (str): omega optimization in IgPhyML (--omega) kappa (str): kappa optimization", "mout[1][\"PASS\"]: #passreads += r.dupcount if r.clone in clones: clones[r.clone].append(r) cloneseqs[r.clone].append(mask_seq) else: clones[r.clone] =", "remove CDR3 from all sequences.\"\"\") group.add_argument(\"--nmask\", action=\"store_true\", dest=\"nmask\", help=\"\"\"If specified, do not attempt", "from sequence ID r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID r.sequence_id =", "choices=(\"none\", \"all\"), dest=\"clean\", type=str, default=\"none\", help=\"\"\"Delete intermediate files? none: leave all intermediate files;", "% (scodons[ospos], ospos), debug) log[\"PASS\"]=False log[\"FAIL\"]=\"FAILED_MATCH:\"+str(spos) elif qcodons[qpos] == scodons[spos]: printDebug(\"Masked %s at", "in input sequence). \"\"\" spos = 0 for i in range(0, len(scodons)): printDebug(\"%s", "or len(clones[seqi].getField(\"imgtpartlabels\")) != len(imgtar): correctseqs = True if correctseqs: maxlen = sites maximgt", "None: seq, cid = seq_f.split(delim) cid = delim + cid.replace(\":\", \"_\") sid =", "argument parser parser = ArgumentParser(description=__doc__, epilog=fields, parents=[parser_parent], formatter_class=CommonHelpFormatter, add_help=False) group = parser.add_argument_group(\"sequence processing", "!= qi[2:3] or scodons[i + 1] != qi[3:6]: qi = \"NN\" + qi", "= 0 for i in range(0, len(scodons)): printDebug(\"%s %s\" % (scodons[i], qi[0:3]), debug)", "return 0 elif regions[\"fwr3_imgt\"] is not \"\" and regions[\"fwr3_imgt\"] is not None: simgt", "print debugging statements. Returns: tuple: (modified input sequence, modified starting position of IMGT", "clonesizes[str(k)] fails[\"minseq_fail\"] -= clonesizes[str(k)] fail_count = fails[\"rec_count\"] - pass_count # End clone processing", "= r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID r.sequence_id = r.sequence_id.replace(\":\",\"-\") #remove colons from", "log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = concatenated_seq return concatenated_seq, log def", "+ regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] imgtpartlabels = [13]*len(regions[\"fwr1_imgt\"]) + [30]*len(regions[\"cdr1_imgt\"]) + [45]*len(regions[\"fwr2_imgt\"]) + \\", "sequence spos (int): starting position of IMGT sequence in input sequence s_end (int):", "fields to containing metadata to include in output fasta file sequence headers.\"\"\") group.add_argument(\"--clones\",", "delim, newgerm, conseqs, duplicate, imgt): \"\"\" Create intermediate sequence alignment and partition files", "for IgPhyML output Arguments: out_dir (str): directory for sequence files. useqs_f (dict): unique", "ncj < ncounti: join[useqs[kj]] = useqs[ki] joinseqs[kj] = ki # loop through list", "Imports import os import random import subprocess import multiprocessing as mp from argparse", "in range(0, len(newgerm)): clonef.write(\"%s\" % newgerm[i].replace(\".\",\"-\")) clonef.write(\"\\n\") #output partition file partfile = os.path.join(out_dir,", "nproc printLog(log) if not nohlp: try: #estimate HLP parameters/trees p = subprocess.check_output(hlp_args) except", "attempts to correct for this by looking at the next codon over in", "rfrom.dupcount if log is not None: log[rfrom.sequence_id][\"PASS\"] = False log[rfrom.sequence_id][\"DUPLICATE\"] = True log[rfrom.sequence_id][\"COLLAPSETO\"]", "key in sorted(clonesizes, key=clonesizes.get, reverse=True): #print(key + \"\\t\" + str(clonesizes[key])) outfile = os.path.join(clone_dir,", "interval\"\"\") igphyml_group.add_argument(\"-t\", action=\"store\", dest=\"kappa\", type=str, default=\"e\", choices=(\"e\", \"ce\"), help=\"\"\"Kappa parameters to estimate: e", "log[\"OTHER_FAIL\"] = fails[\"other_fail\"] if collapse: log[\"DUPLICATE\"] = fail_count - fails[\"seq_fail\"] log[\"END\"] = \"BuildTrees\"", "motifs log[\"HOTNESS\"] = hotness log[\"NPROC\"] = nproc printLog(log) if not nohlp: try: #estimate", "-= clonesizes[str(k)] fail_count = fails[\"rec_count\"] - pass_count # End clone processing printMessage(\"Done\", start_time=start_time,", "fails[\"nf_fail\"] += 1 return 0 # Run IgPhyML on outputed data def runIgPhyML(outfile,", "succeeds \"\"\" if r.clone is None: printError(\"Cannot export datasets until sequences are clustered", "keys[i] kj = keys[j] if meta_data is None: ski = keys[i] skj =", "+ nseq imgtpartlabels = [13] * len(regions[\"fwr1_imgt\"]) + [30] * len(regions[\"cdr1_imgt\"]) + [45]", "sequence. clones (list): list of receptors. cloneseqs (list): list of masked clone sequences.", "receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = receptor.sequence_imgt return receptor.sequence_imgt, log else: curgap =", "deal with the fact that it's possible to start mid-codon qi,spos = correctMidCodonStart(scodons,", "1 qpos += 1 while spos < s_end and scodons[spos] == \"...\": #possible", "as e: print(\" \".join(hlp_args)) print('error>', e.output, '<') printError(\"HLP tree building failed\") log =", "r.clone in init_clone_sizes: init_clone_sizes[r.clone] += 1 else: init_clone_sizes[r.clone] = 1 for r in", "exporting to fasta.\"\"\") group.add_argument(\"--ncdr3\", action=\"store_true\", dest=\"ncdr3\", help=\"\"\"If specified, remove CDR3 from all sequences.\"\"\")", "(list): receptor objects within the same clone. sequences (list): sequences within the same", "\"\"\" osplit = outfile.split(\".\") outrep = \".\".join(osplit[0:(len(osplit)-1)]) + \"_gy.tsv\" gyout = outfile +", "os.remove(lsplit[1]) os.remove(lsplit[3]) todelete.close() os.remove(outrep) os.remove(outfile) os.remove(gyout) cilog = outrep + \"_igphyml_CIlog.txt_hlp\" if os.path.isfile(cilog):", "both are the same, move both forward spos += 1 qpos += 1", "% key) partfile = os.path.join(clone_dir, \"%s.part.txt\" % key) if clonesizes[key] > 0: germ_id", "over in the #alignment if scodons[i][2:3] != qi[2:3] or scodons[i + 1] !=", "= time() printMessage(\"Correcting frames and indels of sequences\", start_time=start_time, width=50) #subsampling loop init_clone_sizes", "num in useqs_f.items(): seq = seq_f cid = \"\" if meta_data is not", "only 1 sequence? imgtar, germline, sites, nseqs = characterizePartitionErrors(sequences, clones, meta_data) tallies =", "printError(\"number of sites must be divisible by 3! len: %d, clone: %s ,", "from metadata md = md.replace(\":\",\"-\") #remove colons from metadata md = md.replace(\",\",\"-\") #remove", "sequences into clones first and then predict germlines using --cloned\")) if sites >", "regions[\"fwr3_imgt\"] nseq = r.sequence_imgt[len(simgt):len(r.sequence_imgt)] if len(simgt) < len(r.sequence_imgt): simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"]", "append (str): column name to append to sequence_id igphyml (bool): If True, run", "output field help message fields = dedent( \"\"\" output files: <folder> folder containing", "= \"\" if meta_data is not None: meta_data_list = [] for m in", "logs[clones[str(k)][j].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(cloneseqs[str(k)])) logs[clones[str(k)][j].sequence_id][\"PASS\"] = False clonesizes[str(k)] =", "open(igphyml_out) names = igf.readline().split(\"\\t\") vals = igf.readline().split(\"\\t\") for i in range(3,len(names)-1): log[names[i]] =", "log = OrderedDict() log[\"END\"] = \"IgPhyML analysis\" printLog(log) # Note: Collapse can give", "seq.replace(\".\", \"-\"))) else: for j in range(0, nseqs): cid = \"\" if meta_data", "== \"DUPCOUNT\": cid = delim + \"0\" sid = clones[j].sequence_id.translate(transtable)+\"_1\" + cid clonef.write(\">%s\\n%s\\n\"", "four containing a list of IMGT positions for first sequence in clones, the", "log[\"CLONETOOSMALL\"] = fails[\"minseq_fail\"] log[\"CDRFWR_ERROR\"] = fails[\"region_fail\"] log[\"GERMLINE_PTC\"] = fails[\"germlineptc\"] log[\"OTHER_FAIL\"] = fails[\"other_fail\"] if", "oformat == \"tab\": igf = open(igphyml_out) names = igf.readline().split(\"\\t\") vals = igf.readline().split(\"\\t\") for", "Returns: argparse.ArgumentParser: argument parsers. \"\"\" # Define input and output field help message", "estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--motifs\", action=\"store\", dest=\"motifs\", type=str, default=\"WRC_2:0,GYW_0:1,WA_1:2,TW_0:3,SYC_2:4,GRS_0:5\", help=\"\"\"Which", "fail files. \"\"\" # Print parameter info log = OrderedDict() log[\"START\"] = \"BuildTrees\"", "meta_data, clones, collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt) if collapse: return len(useqs_f)", "masked clone sequences. logs (dict): contains log information for each sequence. fails (dict):", "si[i] if si[i] != \".\": ndotgaps.append(0) #find any gaps not divisible by three", "to mask the #first codon in the IMGT seq, other times it will", "fails[\"other_fail\"] if collapse: log[\"DUPLICATE\"] = fail_count - fails[\"seq_fail\"] log[\"END\"] = \"BuildTrees\" printLog(log) #Run", ": list of receptor objects. collapse (bool) : deduplicate sequences. nseqs (int): number", "\"Clone too small: \" + str(len(useqs_f)) logs[clones[num].sequence_id][\"PASS\"] = False return -len(useqs_f) elif not", "+= maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer, mask = not nmask) if", "clones, collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt) if collapse: return len(useqs_f) else:", "position %d\" % (scodons[ospos], ospos), debug) scodons[ospos] = \"NNN\" if \"MASKED\" in log:", "= i break elif scodons[i][0] == \".\": scodons[i] = \"N\" + scodons[i][1:3] if", "getOutputHandle(db_file, out_label=out_label, out_dir=out_args[\"out_dir\"], out_name= out_args[\"out_name\"], out_type=\"tsv\") igphyml_out = None if igphyml: igphyml_out =", "range(0, len(imgt)): if imgt[j] != 108: nseq.append(newseqs[i][j]) if i == 0: ngerm.append(newgerm[j]) nimgt.append(imgt[j])", "kj else: ncj = 0 if useqs[kj] in join: ncj = ambigchar[join[useqs[kj]]] if", "for IgPhyML.\"\"\") igphyml_group.add_argument(\"--omega\", action=\"store\", dest=\"omega\", type=str, default=\"e,e\", choices = (\"e\", \"ce\", \"e,e\", \"ce,e\",", "+= 1 else: init_clone_sizes[r.clone] = 1 for r in all_records: if target_clones is", "log[\"CLONE\"] = r.clone log[\"PASS\"] = False log[\"FAIL\"] = \"NONFUNCTIONAL/PTC\" log[\"SEQ_IN\"] = r.sequence_input logs[r.sequence_id]", "of data sequences. Any clones with fewer than the specified number of sequences", "len(clones[j].getField(\"imgtpartlabels\")) > maximgt: imgtar = clones[j].getField(\"imgtpartlabels\") maximgt = len(imgtar) sites = maxlen for", "\"region_fail\":0, \"germlineptc\":0, \"fdcount\":0, \"totalreads\":0, \"passreads\":0, \"failreads\":0} # Mask codons split by indels start_time", "fail_writer=None, out_dir=None, min_seq=1): \"\"\" Create intermediate sequence alignment and partition files for IgPhyML", "log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" + str(len(scodons)) else: log[\"END-MASKED\"] = str(spos) concatenated_seq", "regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] +", "= None if igphyml: igphyml_out = getOutputName(db_file, out_label=\"igphyml-pass\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=oformat) dir_name, __", "0: newgerm.append(germline[i:(i+3)]) lcodon=germline[i:(i+3)] imgt.append(imgtar[i]) if len(lcodon) == 2: newgerm[-1] = newgerm[-1] + \"N\"", "run CDR/FWR partitioned model on this data.\\n\" imgtpartlabels = [0] * len(r.sequence_imgt) r.setField(\"imgtpartlabels\",", "= keys[i].split(delim) skj, cid = keys[j].split(delim) ri = receptors[useqs[ki]] rj = receptors[useqs[kj]] dist", "p = subprocess.check_output(gy_args) except subprocess.CalledProcessError as e: print(\" \".join(gy_args)) print('error>', e.output, '<') printError(\"GY94", "3): if sequence[i:(i+3)] in ptcs: return i return -1 def rmCDR3(sequences, clones): \"\"\"", "recursive: frameshifts += checkFrameShifts(receptor, oqpos, ospos, log, debug) elif spos >= s_end or", "\"NN\" + qi spos = i break elif scodons[i][0] == \".\": scodons[i] =", "if sites % 3 != 0: printError(\"number of sites must be divisible by", "maxlen = len(sequences[j]) if len(clones[j].getField(\"imgtpartlabels\")) > maximgt: imgtar = clones[j].getField(\"imgtpartlabels\") maximgt = len(imgtar)", "clones[0].getField(\"imgtpartlabels\") germline = clones[0].getField(\"germline_imgt_d_mask\") if germline is \"\": germline = clones[0].getField(\"germline_imgt\") correctseqs =", "for j in range(0,len(imgtar)): if c.getField(\"imgtpartlabels\")[j] != imgtar[j]: printError(\"IMGT assignments are not the", "to sequence IDs. Splits identical sequences with different meta_data collapse (bool): if True", "(2, len(newgerm))) partf.write(\"FWR:IMGT\\n\") partf.write(\"CDR:IMGT\\n\") partf.write(\"%s\\n\" % (clones[0].v_call.split(\"*\")[0])) partf.write(\"%s\\n\" % (clones[0].j_call.split(\"*\")[0])) partf.write(\",\".join(map(str, imgt))) partf.write(\"\\n\")", "\"CLEANING\" log[\"SCOPE\"] = clean printLog(log) todelete = open(outrep) for line in todelete: line", "= 0, 0 printMessage(\"Processing clones\", start_time=start_time, width=50) for k in clones.keys(): if len(clones[str(k)])", "= \"BuildTrees\" log[\"FILE\"] = os.path.basename(db_file) log[\"COLLAPSE\"] = collapse printLog(log) # Open output files", "0 #adjust for the fact that IMGT sequences can end on gaps for", "parseCommonArgs def correctMidCodonStart(scodons, qi, debug): \"\"\" Find and mask split codons Arguments: scodons", "from running IgPhyML, if specified required fields: sequence_id, sequence, sequence_alignment, germline_alignment_d_mask or germline_alignment,", "return 0 if r.functional and ptcs < 0: #If IMGT regions are provided,", "log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" + str(len(scodons)) else: log[\"END-MASKED\"] = str(spos) concatenated_seq =", "= regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"]", "%d!\\n\" % c.clone,False) printError(c.getField(\"imgtpartlabels\"),False) printError(\"%s\\n%d\\n\" % (imgtar,j),False) for j in range(0, len(sequences)): printError(\"%s\\n%s\\n\"", "len(lsplit) == 4: os.remove(lsplit[0]) os.remove(lsplit[1]) os.remove(lsplit[3]) todelete.close() os.remove(outrep) os.remove(outfile) os.remove(gyout) cilog = outrep", "debug) log[\"PASS\"]=False log[\"FAIL\"]=\"FAILED_MATCH:\"+str(spos) elif qcodons[qpos] == scodons[spos]: printDebug(\"Masked %s at position %d\" %", "len(germline) < len(sequences[i]): # print(\"\\n\" + str(clones[i].sequence_id)) # print(\"\\n \" + str((sequences[i])) )", "i == 0 and curgap != 0: if curgap % 3 != 0", "txt will delete all tree file results.\\n\" \"You'll have to do that yourself.\")", "as clonef: if collapse: for seq_f, num in useqs_f.items(): seq = seq_f cid", "(\"e\", \"ce\", \"e,e\", \"ce,e\", \"e,ce\", \"ce,ce\"), help=\"\"\"Omega parameters to estimate for FWR,CDR respectively:", "deduplicated receptors within a clone. \"\"\" keys = list(useqs.keys()) join = {} #", "else: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"PASS\"] = False", "if mout[1][\"PASS\"]: #passreads += r.dupcount if r.clone in clones: clones[r.clone].append(r) cloneseqs[r.clone].append(mask_seq) else: clones[r.clone]", "germ_id = [\"GERM\"] if meta_data is not None: for i in range(1, len(meta_data)):", "asr=-1, format=default_format, out_args=default_out_args): \"\"\" Masks codons split by alignment to IMGT reference, then", "indels of sequences\", start_time=start_time, width=50) #subsampling loop init_clone_sizes = {} big_enough = []", "\"\"\" Checks whether a frameshift occured in a sequence Arguments: receptor (changeo.Receptor.Receptor): Receptor", "there are differences, e.g. if reconstruction was done before clonal clustering resolveglines =", "id -> number ATCG nucleotides for i in range(0,len(keys)-1): for j in range(i+1,len(keys)):", "receptor.sequence_imgt = ris return frameshifts def findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log,", "elif i == 0 and curgap != 0: if curgap % 3 !=", "lineage trees from running IgPhyML, if specified required fields: sequence_id, sequence, sequence_alignment, germline_alignment_d_mask", "* len(regions[\"cdr1_imgt\"]) + [45] * len( regions[\"fwr2_imgt\"]) + \\ [60] * len(regions[\"cdr2_imgt\"]) +", "str(len(conseqs)) logs[clones[j].sequence_id][\"PASS\"] = False return -len(conseqs) # Output fasta file of masked, concatenated", "joined sequences and collapse keys = list(useqs.keys()) for k in keys: if useqs[k]", "# Print parameter info log = OrderedDict() log[\"START\"] = \"BuildTrees\" log[\"FILE\"] = os.path.basename(db_file)", "minimum number of data sequences to include. Returns: int: number of clones. \"\"\"", "ros receptor.sequence_imgt = ris frameshifts += 1 printDebug(\"FRAMESHIFT of length %d!\" % ins,", "this was due to a frame-shift by repeating this method but with an", "at position %d, at end of subject sequence\" % (scodons[ospos], ospos), debug) if", "str((sequences[i])) ) # print(\"\\n\" + str((germline))) for j in range(0,len(imgtar)): if imgtar[j] !=", "-len(conseqs) # Output fasta file of masked, concatenated sequences outputSeqPartFiles(out_dir, useqs_f, meta_data, clones,", "len(lcodon) == 1: newgerm[-1] = newgerm[-1] + \"NN\" if ncdr3: ngerm = []", "start_time) ptcs = hasPTC(r.sequence_imgt) gptcs = hasPTC(r.getField(\"germline_imgt_d_mask\")) if gptcs >= 0: log =", "scodons[s_end]), debug) s_end += 1 qpos = 0 if mask: findAndMask(receptor, scodons, qcodons,", "imgt[j] != 108: nseq.append(newseqs[i][j]) if i == 0: ngerm.append(newgerm[j]) nimgt.append(imgt[j]) else: ncdr3 +=", "\"other_fail\":0, \"region_fail\":0, \"germlineptc\":0, \"fdcount\":0, \"totalreads\":0, \"passreads\":0, \"failreads\":0} # Mask codons split by indels", "on outputed data def runIgPhyML(outfile, igphyml_out, clone_dir, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\",", "return 1 else: if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"failreads\"] += r.dupcount if", "import CommonHelpFormatter, checkArgs, getCommonArgParser, parseCommonArgs def correctMidCodonStart(scodons, qi, debug): \"\"\" Find and mask", "not None: log[rfrom.sequence_id][\"PASS\"] = False log[rfrom.sequence_id][\"DUPLICATE\"] = True log[rfrom.sequence_id][\"COLLAPSETO\"] = joinseqs[k] log[rfrom.sequence_id][\"COLLAPSEFROM\"] =", "1: curgap += 1 elif i == 0 and curgap != 0: if", "break return dist def deduplicate(useqs, receptors, log=None, meta_data=None, delim=\":\"): \"\"\" Collapses identical sequences", "of the first receptor in clones, the length of the first sequence in", "+ qi spos = i break elif scodons[i][0] == \".\": scodons[i] = \"N\"", "%s, seq: %s\" %(len(sequences[0]),\\ clones[0].clone,clones[0].sequence_id,sequences[0])) return imgtar, germline, sites, nseqs def outputSeqPartFiles(out_dir, useqs_f,", "# remove gap only sites from observed data newgerm = [] imgt =", "collapse: useqs_f = deduplicate(useqs_f, clones, logs, meta_data, delim) if collapse and len(useqs_f) <", "it was preserved qpos -= 1 spos = ospos printDebug(\"But codon was apparently", "mask split codons sample_depth (int): depth of subsampling before deduplication min_seq (int): minimum", "i in scodons: concatenated_seq += i # add --- gaps back to IMGT", "imgtar = clones[j].getField(\"imgtpartlabels\") maximgt = len(imgtar) sites = maxlen for j in range(0,len(sequences)):", "tree file results.\\n\" \"You'll have to do that yourself.\") log = OrderedDict() log[\"END\"]", "information if some sequences have ambiguous characters at polymorphic sites def buildTrees(db_file, meta_data=None,", "in ndotgaps: if i == 1: curgap += 1 elif i == 0", "joinseqs = {} # id -> useq to join with (least ambiguous chars)", "== \"...\": #possible next codon is just a gap spos += 1 while", "os.path.join(clone_dir, \"%s.fasta\" % key) partfile = os.path.join(clone_dir, \"%s.part.txt\" % key) if clonesizes[key] >", "= ospos printDebug(\"But codon was apparently preserved\", debug) if \"IN-FRAME\" in log: log[\"IN-FRAME\"]", "Run IgPhyML on outputed data def runIgPhyML(outfile, igphyml_out, clone_dir, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\",", "= len(all_records) #fails[\"minseq_fail\"] = len(all_records) - len(big_enough) if len(big_enough) == 0: printError(\"\\n\\nNo sequences", "object. min_seq (int): minimum number of data sequences to include. Returns: int: number", "debug) scodons[ospos] = \"NNN\" if \"MASKED\" in log: log[\"MASKED\"] = log[\"MASKED\"] + \",\"", "#adjust for the fact that IMGT sequences can end on gaps for i", "\"--outrep\", outrep, \"--threads\", str(nproc),\"--outname\",gyout] hlp_args = [\"igphyml\",\"--repfile\", outrep, \"-m\", \"HLP\", \"--run_id\", \"hlp\", \"--threads\",", "'<') printError(\"GY94 tree building in IgPhyML failed\") log = OrderedDict() log[\"START\"] = \"IgPhyML", "%s.\" % format) out_fields = getDbFields(db_file, reader=reader) # open input file handle =", "s_end or qcodons[qpos] != scodons[spos]: scodons[ospos] = \"NNN\" if spos >= s_end: printDebug(\"Masked", "match for the adjacent codon was found, something\"s up. log[\"FAIL\"] = \"FAILED_MATCH_QSTRING:\"+str(spos) #figure", "position %d %d\" % (scodons[spos], spos, qpos)) ospos=spos oqpos=qpos spos += 1 qpos", "sequences\", start_time=start_time, width=50) #subsampling loop init_clone_sizes = {} big_enough = [] all_records =", "= OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"SEQ_IN\"] = r.sequence_input log[\"SEQ_IMGT\"] =", "found.\") found_no_funct = True r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID r.sequence_id", "optimization, \"--omega\", omega, \"-t\", kappa, \"--motifs\", motifs, \"--hotness\", hotness, \"--oformat\", oformat, \"--outname\", igphyml_out]", "keys[i].split(delim) skj, cid = keys[j].split(delim) ri = receptors[useqs[ki]] rj = receptors[useqs[kj]] dist =", "of clonal positions . \"\"\" # bootstrap these data if desired lg =", "sample_depth: break # Start processing clones clonesizes = {} pass_count, nclones = 0,", "\"\": ngermline = c.getField(\"germline_imgt\") if ngermline != germline: resolveglines = True if resolveglines:", "if sequence[i:(i+3)] in ptcs: return i return -1 def rmCDR3(sequences, clones): \"\"\" Remove", "to include. Returns: int: number of clones. \"\"\" s = \"\" delim =", "len(all_records) - len(big_enough) if len(big_enough) == 0: printError(\"\\n\\nNo sequences found that match specified", "+= rfrom.dupcount if log is not None: log[rfrom.sequence_id][\"PASS\"] = False log[rfrom.sequence_id][\"DUPLICATE\"] = True", "+ ncon_seq) if i == 1: ncon_seq = ncon_seq + \".\" elif i", "printDebug(receptor.sequence_input, debug) printDebug(ris, debug) printDebug(receptor.sequence_imgt, debug) printDebug(\"RUNNING %d\" % ins, debug) mout =", "in clones. clones (list): list of Receptor objects. \"\"\" for i in range(0,len(sequences)):", "concatenated_seq[counter] counter += 1 ncon_seq = ncon_seq + concatenated_seq[counter:] concatenated_seq = ncon_seq log[\"SEQ_IN\"]", "= clones[j].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) if nseqs == 1", "start_time = time() printMessage(\"Correcting frames and indels of sequences\", start_time=start_time, width=50) #subsampling loop", "help=\"\"\"Run IgPhyML on output?\"\"\") igphyml_group.add_argument(\"--nproc\", action=\"store\", dest=\"nproc\", type=int, default=1, help=\"\"\"Number of threads to", "of information for each sequence debug (bool): print debugging statements? recursive (bool): was", "cimgt = clones[j].getField(\"imgtpartlabels\") seqdiff = maxlen - len(sequences[j]) imgtdiff = len(imgtar)-len(cimgt) sequences[j] =", "cid = \"\" if meta_data is not None: meta_data_list = [] for m", "log_handle = open(out_args[\"log_file\"], \"w\") for j in logs.keys(): printLog(logs[j], handle=log_handle) pass_handle.write(str(nclones)+\"\\n\") for key", "imgtar, germline, sites, nseqs = characterizePartitionErrors(sequences, clones, meta_data) tallies = [] for i", "%s at position %d, at end of subject sequence\" % (scodons[ospos], ospos), debug)", "i in range(0, sites, 3): if i == 0: newseqs.append([]) if tallies[i//3] >", "excluded.\"\"\") group.add_argument(\"--sample\", action=\"store\", dest=\"sample_depth\", type=int, default=-1, help=\"\"\"Depth of reads to be subsampled (before", "[108] * int(len(nseq)) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or simgt != r.sequence_imgt:", "+= 1 else: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"PASS\"]", "def buildTrees(db_file, meta_data=None, target_clones=None, collapse=False, ncdr3=False, nmask=False, sample_depth=-1, min_seq=1,append=None, igphyml=False, nproc=1, optimization=\"lr\", omega=\"e,e\",", "\"fail\": None} if pass_handle is not None: output[\"pass\"] = pass_handle.name pass_handle.close() if fail_handle", "c.setField(meta_data[0],c.getField(meta_data_ar[0])) for m in range(1,len(meta_data_ar)): st = c.getField(meta_data[0])+\":\"+c.getField(meta_data_ar[m]) c.setField(meta_data[0],st) if len(c.getField(\"imgtpartlabels\")) != len(imgtar):", "!= 0: if curgap % 3 != 0 : printDebug(\"Frame-shifting gap detected! Refusing", "if pass_handle is not None: output[\"pass\"] = pass_handle.name pass_handle.close() if fail_handle is not", "= log if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"nf_fail\"] += 1 return 0", "at position %d, but couldn't find upstream match\" % (scodons[ospos], ospos), debug) log[\"PASS\"]=False", "= getArgParser() checkArgs(parser) args = parser.parse_args() args_dict = parseCommonArgs(args) del args_dict[\"db_files\"] # Call", "after first difference found? Returns: int: number of ACGT differences. \"\"\" if len(seq1)", "range(0, nseqs): if sequences[j][i:(i + 3)] != \"...\": tally += 1 tallies.append(tally) newseqs", "= estimate + confidence interval\"\"\") igphyml_group.add_argument(\"-t\", action=\"store\", dest=\"kappa\", type=str, default=\"e\", choices=(\"e\", \"ce\"), help=\"\"\"Kappa", "- 1 + oqpos*3 pisite = ospos * 3 if (psite + 3", "number of data sequences to include. Returns: int: number of clones. \"\"\" s", "ki.count(\"G\") + ki.count(\"C\") ncountj = kj.count(\"A\") + kj.count(\"T\") + kj.count(\"G\") + kj.count(\"C\") ambigchar[useqs[ki]]", "if imgtar[j] != 108: nseq.append(sequences[i][j]) if j < len(germline): ngermline.append(germline[j]) nimgtar.append(imgtar[j]) else: ncdr3", "join: rfrom = receptors[useqs[k]] rto = receptors[join[useqs[k]]] rto.dupcount += rfrom.dupcount if log is", "out_name=out_args[\"out_name\"], out_type=oformat) dir_name, __ = os.path.split(pass_handle.name) if out_args[\"out_name\"] is None: __, clone_name, __", "colons from sequence ID r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID r.sequence_id", "(list): list of masked clone sequences. logs (dict): contains log information for each", "= fails[\"in_fail\"] log[\"CLONETOOSMALL\"] = fails[\"minseq_fail\"] log[\"CDRFWR_ERROR\"] = fails[\"region_fail\"] log[\"GERMLINE_PTC\"] = fails[\"germlineptc\"] log[\"OTHER_FAIL\"] =", "logs (dict): contains log information for each sequence. fails (dict): counts of various", "sequences with different meta_data. Returns: tuple: tuple of length four containing a list", "\" + str(len(cloneseqs[str(k)])) logs[clones[str(k)][j].sequence_id][\"PASS\"] = False clonesizes[str(k)] = -len(cloneseqs[str(k)]) else: clonesizes[str(k)] = outputIgPhyML(clones[str(k)],", "clone %d!\\n\" % c.clone, False) printError(c.getField(\"imgtpartlabels\"), False) printError(\"%s\\n%d\\n\" % (imgtar, j)) #Resolve germline", "\"hlp\", \"--threads\", str(nproc), \"-o\", optimization, \"--omega\", omega, \"-t\", kappa, \"--motifs\", motifs, \"--hotness\", hotness,", "fail_count - fails[\"seq_fail\"] log[\"END\"] = \"BuildTrees\" printLog(log) #Run IgPhyML on outputted data? if", "1 else: # if not the same, mask IMGT at that site and", "range(i+1,len(keys)): ki = keys[i] kj = keys[j] if meta_data is None: ski =", "si[i] != \".\": ndotgaps.append(0) #find any gaps not divisible by three curgap =", "0: #If IMGT regions are provided, record their positions rd = RegionDefinition(r.junction_length, amino_acid=False)", "handle=log_handle) pass_handle.write(str(nclones)+\"\\n\") for key in sorted(clonesizes, key=clonesizes.get, reverse=True): #print(key + \"\\t\" + str(clonesizes[key]))", "nohlp (bool): If True, only estimate GY94 trees and parameters clean (str): delete", "Returns: int: number of ACGT differences. \"\"\" if len(seq1) != len(seq2): printError(\"Sequences are", "kappa=kappa, motifs=motifs, hotness=hotness, oformat=oformat, nohlp=nohlp,clean=clean,asr=asr) return output def getArgParser(): \"\"\" Defines the ArgumentParser", "newgerm[-1] + \"NN\" if ncdr3: ngerm = [] nimgt = [] for i", "in range(0, len(newseqs)): nseq = [] ncdr3 = 0 for j in range(0,", "dist == 0 and m_match: ncounti = ki.count(\"A\") + ki.count(\"T\") + ki.count(\"G\") +", "\" + str(ncdr3)) def characterizePartitionErrors(sequences, clones, meta_data): \"\"\" Characterize potential mismatches between IMGT", "for j in range(0, nseqs): logs[clones[j].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(conseqs))", "if ngermline != germline: resolveglines = True if resolveglines: printError(\"%s %s\" % (\"Predicted", "!= r.sequence_imgt: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"SEQ_IN\"] =", "arguments and calls main \"\"\" # Parse command line arguments parser = getArgParser()", "= igphyml_out if oformat == \"tab\": igf = open(igphyml_out) names = igf.readline().split(\"\\t\") vals", "sequence in IMGT sequence log (dict): log of information for each sequence debug", "\"...\": #possible next codon is just a gap spos += 1 while qpos", "negative if not PTCs, position of PTC if found. \"\"\" ptcs = (\"TAA\",", "can end on gaps for i in range(spos, len(scodons)): if scodons[i] != \"...\"", "= [] for m in range(0,len(meta_data)): if isinstance(clones[j].getField(meta_data[m]), str): clones[j].setField(meta_data[m],clones[j].getField(meta_data[m]).replace(\"_\", \"\")) meta_data_list.append(str(clones[j].getField(meta_data[m]))) conseq_f", "imgt): \"\"\" Create intermediate sequence alignment and partition files for IgPhyML output Arguments:", "file needs to be corrected\") for j in range(0,len(imgtar)): if c.getField(\"imgtpartlabels\")[j] != imgtar[j]:", "log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = receptor.sequence_imgt return receptor.sequence_imgt, log else: curgap = 0", "end on gaps for i in range(spos, len(scodons)): if scodons[i] != \"...\" and", "scodons[spos]: scodons[ospos] = \"NNN\" if spos >= s_end: printDebug(\"Masked %s at position %d,", "printError(\"%s\\n%d\\n\" % (imgtar,j),False) for j in range(0, len(sequences)): printError(\"%s\\n%s\\n\" % (sequences[j],clones[j].getField(\"imgtpartlabels\")),False) printError(\"ChangeO file", "seq1[i] != seq2[i]: dist += 1 if fbreak: break return dist def deduplicate(useqs,", "None): for m in range(0,len(meta_data)): md = r.getField(meta_data[m]) md = md.replace(\",\",\"-\") #remove commas", "results.\\n\" \"You'll have to do that yourself.\") log = OrderedDict() log[\"END\"] = \"IgPhyML", "if meta_data is not None: for i in range(1,len(meta_data)): germ_id.append(\"GERM\") clonef.write(\">%s_%s\\n\" % (clones[0].clone,\"_\".join(germ_id)))", "str(spos) elif qpos >= len(qcodons) and spos < s_end: printDebug(\"FAILING MATCH\", debug) log[\"PASS\"]", "codon receptor.sequence_input = ros[0:(psite + 3)] + ros[(psite + 3 + ins):] receptor.sequence_imgt", "= keys[j] if meta_data is None: ski = keys[i] skj = keys[j] else:", "\"<NAME>\" from changeo import __version__, __date__ # Imports import os import random import", "will be collapse to. if ncountj > ncounti: nci = 0 if useqs[ki]", "within the same clone. sequences (list): sequences within the same clone (share indexes", "is not None: fail_writer.writeReceptor(clones[j]) else: useqs_f[conseq_f] = j conseqs.append(conseq) if collapse: useqs_f =", "data def runIgPhyML(outfile, igphyml_out, clone_dir, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", nohlp=False,", "fails[\"nf_fail\"] log[\"FRAMESHIFT_DEL\"] = fails[\"del_fail\"] log[\"FRAMESHIFT_INS\"] = fails[\"in_fail\"] log[\"CLONETOOSMALL\"] = fails[\"minseq_fail\"] log[\"CDRFWR_ERROR\"] = fails[\"region_fail\"]", "identical sequences. ncdr3 (bool): if True remove CDR3 logs (dict): contains log information", "igphyml_out, clone_dir, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", nohlp=False, asr=-1, clean=\"none\"): \"\"\"", "ris[0:(pisite + 3)] + ris[(pisite + 3):] # Debug sequence modifications printDebug(ros, debug)", "regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] imgtpartlabels = [13]*len(regions[\"fwr1_imgt\"])", "# Define argument parser parser = ArgumentParser(description=__doc__, epilog=fields, parents=[parser_parent], formatter_class=CommonHelpFormatter, add_help=False) group =", "occured in a sequence Arguments: receptor (changeo.Receptor.Receptor): Receptor object. oqpos (int) : position", "in range(0, len(scodons)): printDebug(\"%s %s\" % (scodons[i], qi[0:3]), debug) if scodons[i] != \"...\":", "\"...\" and len(scodons[i]) == 3 and scodons[i] != \"NNN\": s_end = i printDebug(\"%i:%i:%s\"", "% (scodons[spos], spos, qpos)) ospos=spos oqpos=qpos spos += 1 qpos += 1 while", "list(useqs.keys()) for k in keys: if useqs[k] in join: rfrom = receptors[useqs[k]] rto", "r.sequence_id.replace(\":\",\"-\") #remove colons from sequence ID r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from sequence", "omega optimization in IgPhyML (--omega) kappa (str): kappa optimization in IgPhyML (-t) motifs", "confidence interval\"\"\") igphyml_group.add_argument(\"--oformat\", action=\"store\", dest=\"oformat\", type=str, default=\"tab\", choices=(\"tab\", \"txt\"), help=\"\"\"IgPhyML output format.\"\"\") igphyml_group.add_argument(\"--nohlp\",", "out_label=\"lineages-fail\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=out_args[\"out_type\"]) fail_writer = writer(fail_handle, fields=out_fields) cloneseqs = {} clones =", "if \"MASKED\" in log: log[\"MASKED\"] = log[\"MASKED\"] + \",\" + str(spos) else: log[\"MASKED\"]", "first receptor in clones, the length of the first sequence in clones, and", "debug) qpos += 1 if qcodons[qpos-1] == scodons[ospos]: #if codon in previous position", "md = md.replace(\",\",\"-\") #remove commas from metadata md = md.replace(\")\",\"-\") #remove parenthesis from", "format.\"\"\") igphyml_group.add_argument(\"--nohlp\", action=\"store_true\", dest=\"nohlp\", help=\"\"\"Don't run HLP model?\"\"\") igphyml_group.add_argument(\"--asr\", action=\"store\", dest=\"asr\", type=float, default=-1,", "+ [45] * len( regions[\"fwr2_imgt\"]) + \\ [60] * len(regions[\"cdr2_imgt\"]) + [80] *", "to fasta.\"\"\") group.add_argument(\"--ncdr3\", action=\"store_true\", dest=\"ncdr3\", help=\"\"\"If specified, remove CDR3 from all sequences.\"\"\") group.add_argument(\"--nmask\",", "\"\".join([str(seq_rec) for seq_rec in newseqs[j]]) if meta_data is not None: meta_data_list = []", "c.getField(\"imgtpartlabels\")[j] != imgtar[j]: printError(\"IMGT assignments are not the same within clone %d!\\n\" %", "% (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) else: for j in range(0, nseqs): cid = \"\"", "for output files. fail_writer (changeo.IO.TSVWriter): failed sequences writer object. min_seq (int): minimum number", "fail_handle = getOutputHandle(db_file, out_label=\"lineages-fail\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=out_args[\"out_type\"]) fail_writer = writer(fail_handle, fields=out_fields) cloneseqs =", "Collapses identical sequences Argument: useqs (dict): unique sequences within a clone. maps sequence", "IMGT labels within a clone Arguments: sequences (list): list of sequences in clones.", "attempt to mask split codons sample_depth (int): depth of subsampling before deduplication min_seq", "j conseqs.append(conseq) if collapse: useqs_f = deduplicate(useqs_f, clones, logs, meta_data, delim) if collapse", "concatenated_seq = ncon_seq log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = concatenated_seq return", "\" + scodons[spos]+ \"\\t\" + qcodons[qpos], debug) qpos += 1 if qcodons[qpos-1] ==", "+ 3):] # Debug sequence modifications printDebug(ros, debug) printDebug(receptor.sequence_input, debug) printDebug(ris, debug) printDebug(receptor.sequence_imgt,", "if desired lg = len(newgerm) sites = range(0, lg) transtable = clones[0].sequence_id.maketrans(\" \",", "leave all intermediate files; all: delete all intermediate files.\"\"\") igphyml_group.add_argument(\"--optimize\", action=\"store\", dest=\"optimization\", type=str,", "\"\"\" spos = 0 for i in range(0, len(scodons)): printDebug(\"%s %s\" % (scodons[i],", "ri.getField(m) == rj.getField(m) and m != \"DUPCOUNT\": matches += 1 m_match = (matches", "#remove commas from sequence ID r.sequence_id = r.sequence_id.replace(\")\",\"-\") #remove parenthesis from sequence ID", "parallelize IgPhyML across optimization (str): Optimize combination of topology (t) branch lengths (l)", "OrderedDict() fails = {\"rec_count\":0, \"seq_fail\":0, \"nf_fail\":0, \"del_fail\":0, \"in_fail\":0, \"minseq_fail\":0, \"other_fail\":0, \"region_fail\":0, \"germlineptc\":0, \"fdcount\":0,", "to append to sequence IDs. Splits identical sequences with different meta_data. Returns: tuple:", "files.\"\"\") igphyml_group.add_argument(\"--optimize\", action=\"store\", dest=\"optimization\", type=str, default=\"lr\", choices=(\"n\",\"r\",\"l\",\"lr\",\"tl\",\"tlr\"), help=\"\"\"Optimize combination of topology (t) branch", "qpos += 1 spos += 1 else: # if not the same, mask", "for k in keys: if useqs[k] in join: rfrom = receptors[useqs[k]] rto =", "all_records = [] found_no_funct = False for r in records: if r.functional is", "0: nclones += 1 pass_count += clonesizes[str(k)] else: fails[\"seq_fail\"] -= clonesizes[str(k)] fails[\"minseq_fail\"] -=", "= [13]*len(regions[\"fwr1_imgt\"]) + [30]*len(regions[\"cdr1_imgt\"]) + [45]*len(regions[\"fwr2_imgt\"]) + \\ [60]*len(regions[\"cdr2_imgt\"]) + [80]*len(regions[\"fwr3_imgt\"]) + [108]", "from presto.IO import printLog, printMessage, printWarning, printError, printDebug from changeo.Defaults import default_format from", "lg) transtable = clones[0].sequence_id.maketrans(\" \", \"_\") outfile = os.path.join(out_dir, \"%s.fasta\" % clones[0].clone) with", "recursive=False, mask=True): \"\"\" Identify junction region by IMGT definition. Arguments: receptor (changeo.Receptor.Receptor): Receptor", "default=-1, help=\"\"\"Ancestral sequence reconstruction interval (0-1).\"\"\") return parser if __name__ == \"__main__\": \"\"\"", "+= 1 fails[\"germlineptc\"] += 1 return 0 if r.functional and ptcs < 0:", "= subprocess.check_output(gy_args) except subprocess.CalledProcessError as e: print(\" \".join(gy_args)) print('error>', e.output, '<') printError(\"GY94 tree", "optimization log[\"TS/TV\"] = kappa log[\"wFWR,wCDR\"] = omega log[\"MOTIFS\"] = motifs log[\"HOTNESS\"] = hotness", "* len(r.sequence_imgt) r.setField(\"imgtpartlabels\", imgtpartlabels) mout = maskSplitCodons(r, mask=mask) mask_seq = mout[0] ptcs =", "[] all_records = [] found_no_funct = False for r in records: if r.functional", "small: \" + str(len(cloneseqs[str(k)])) logs[clones[str(k)][j].sequence_id][\"PASS\"] = False clonesizes[str(k)] = -len(cloneseqs[str(k)]) else: clonesizes[str(k)] =", "to start mid-codon qi,spos = correctMidCodonStart(scodons, qi, debug) qcodons = [qi[i:i + 3]", "igphyml -h for details)\") igphyml_group.add_argument(\"--igphyml\", action=\"store_true\", dest=\"igphyml\", help=\"\"\"Run IgPhyML on output?\"\"\") igphyml_group.add_argument(\"--nproc\", action=\"store\",", "import random import subprocess import multiprocessing as mp from argparse import ArgumentParser from", "3)] frameshifts = 0 s_end = 0 #adjust for the fact that IMGT", "debug = False qi = receptor.sequence_input si = receptor.sequence_imgt log = OrderedDict() log[\"ID\"]=receptor.sequence_id", "len(sequences[j]) imgtdiff = len(imgtar)-len(cimgt) sequences[j] = sequences[j] + \"N\"*(seqdiff) last = cimgt[-1] cimgt.extend([last]*(imgtdiff))", "of masked, concatenated sequences outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm, conseqs,", "else: scodons[-1] = \"NNN\" if \"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\"", "ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--oformat\", action=\"store\", dest=\"oformat\", type=str, default=\"tab\", choices=(\"tab\", \"txt\"),", "OrderedDict() conseqs = [] for j in range(0, nseqs): conseq = \"\".join([str(seq_rec) for", "log[rfrom.sequence_id][\"COLLAPSETO\"] = joinseqs[k] log[rfrom.sequence_id][\"COLLAPSEFROM\"] = k log[rfrom.sequence_id][\"FAIL\"] = \"Collapsed with \" + rto.sequence_id", "length! %s %s\" % (seq1, seq2)) dist = 0 for i in range(0,len(seq1)):", "to. if ncountj > ncounti: nci = 0 if useqs[ki] in join: nci", "Characterize potential mismatches between IMGT labels within a clone Arguments: sequences (list): list", "\"\\t\" + qcodons[qpos]) if scodons[spos] == \"...\" and qcodons[qpos] != \"...\": #if IMGT", "duplicate, imgt) if collapse: return len(useqs_f) else: return nseqs def maskCodonsLoop(r, clones, cloneseqs,", "init_clone_sizes: init_clone_sizes[r.clone] += 1 else: init_clone_sizes[r.clone] = 1 for r in all_records: if", "= nsi + si[i] if si[i] != \".\": ndotgaps.append(0) #find any gaps not", "files. useqs_f (dict): unique sequences mapped to ids. meta_data (str): Field to append", "group.add_argument(\"--md\", nargs=\"+\", action=\"store\", dest=\"meta_data\", help=\"\"\"List of fields to containing metadata to include in", "clones[0].clone) with open(partfile, \"w\") as partf: partf.write(\"%d %d\\n\" % (2, len(newgerm))) partf.write(\"FWR:IMGT\\n\") partf.write(\"CDR:IMGT\\n\")", "def outputIgPhyML(clones, sequences, meta_data=None, collapse=False, ncdr3=False, logs=None, fail_writer=None, out_dir=None, min_seq=1): \"\"\" Create intermediate", "the same clone. sequences (list): sequences within the same clone (share indexes with", "big_enough.append(r) fails[\"totalreads\"] = len(all_records) #fails[\"minseq_fail\"] = len(all_records) - len(big_enough) if len(big_enough) == 0:", "codon in the IMGT seq, other times it will be legitimately absent from", "if not the same, mask IMGT at that site and scan forward until", "os.remove(gyout) cilog = outrep + \"_igphyml_CIlog.txt_hlp\" if os.path.isfile(cilog): os.remove(cilog) if oformat == \"tab\":", "+ ros[(psite + 3 + ins):] receptor.sequence_imgt = ris[0:(pisite + 3)] + ris[(pisite", "germline, sites, nseqs = characterizePartitionErrors(sequences, clones, meta_data) tallies = [] for i in", "meta_data: meta_data_list.append(clones[j].getField(m).replace(\":\", \"_\")) cid = delim + str(delim.join(meta_data_list)) sid = clones[j].sequence_id.translate(transtable) + cid", "cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) else: for j in range(0, nseqs): cid", "= \"NN\" + scodons[i][2] #sometimes IMGT will just cut off first letter if", "nproc=nproc, optimization=optimization, omega=omega, kappa=kappa, motifs=motifs, hotness=hotness, oformat=oformat, nohlp=nohlp,clean=clean,asr=asr) return output def getArgParser(): \"\"\"", "log[\"SEQ_MASKED\"] = concatenated_seq return concatenated_seq, log def unAmbigDist(seq1, seq2, fbreak=False): \"\"\" Calculate the", "else: printDebug(\"Masked %s at position %d, but couldn't find upstream match\" % (scodons[ospos],", "\"FRAME-SHIFTING DELETION\": fails[\"del_fail\"] += 1 elif mout[1][\"FAIL\"] == \"SINGLE FRAME-SHIFTING INSERTION\": fails[\"in_fail\"] +=", "a particular sequence. clones (list): list of receptors. cloneseqs (list): list of masked", "% (clones[0].clone,\"_\".join(germ_id))) for i in range(0, len(newgerm)): clonef.write(\"%s\" % newgerm[i].replace(\".\",\"-\")) clonef.write(\"\\n\") #output partition", "processing failures. out_args (dict): arguments for output preferences. fail_writer (changeo.IO.TSVWriter): failed sequences writer", "if useqs[ki] in join: nci = ambigchar[join[useqs[ki]]] if nci < ncountj: join[useqs[ki]] =", "= {} pass_count, nclones = 0, 0 printMessage(\"Processing clones\", start_time=start_time, width=50) for k", "dist = unAmbigDist(ski, skj, True) m_match = True if meta_data is not None:", "fields: sequence_id, sequence, sequence_alignment, germline_alignment_d_mask or germline_alignment, v_call, j_call, clone_id, v_sequence_start \"\"\") #", "preserved qpos -= 1 spos = ospos printDebug(\"But codon was apparently preserved\", debug)", "regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] if len(simgt) < len(r.sequence_imgt): r.fwr4_imgt", "in clones, the length of the first sequence in clones, and the number", "os.path.join(out_dir, \"%s.fasta\" % clones[0].clone) with open(outfile, \"w\") as clonef: if collapse: for seq_f,", "if mout[1][\"FAIL\"] == \"FRAME-SHIFTING DELETION\": fails[\"del_fail\"] += 1 elif mout[1][\"FAIL\"] == \"SINGLE FRAME-SHIFTING", ": position of interest in IMGT sequence. log (dict) : log of information", "str(len(scodons)) else: log[\"END-MASKED\"] = str(spos) concatenated_seq = Seq(\"\") for i in scodons: concatenated_seq", "+= 1 fails[\"failreads\"] += r.dupcount if mout[1][\"FAIL\"] == \"FRAME-SHIFTING DELETION\": fails[\"del_fail\"] += 1", "germline_alignment, v_call, j_call, clone_id, v_sequence_start \"\"\") # Parent parser parser_parent = getCommonArgParser(out_file=False, log=True,", "tab-delimited database file. meta_data (str): Field to append to sequence IDs. Splits identical", "sure to cluster sequences into clones first and then predict germlines using --cloned\"))", "until sequences are clustered into clones.\") if r.dupcount is None: r.dupcount = 1", "is None: ski = keys[i] skj = keys[j] else: ski, cid = keys[i].split(delim)", "dest=\"collapse\", help=\"\"\"If specified, collapse identical sequences before exporting to fasta.\"\"\") group.add_argument(\"--ncdr3\", action=\"store_true\", dest=\"ncdr3\",", "== \"..\": scodons[i] = \"NN\" + scodons[i][2] #sometimes IMGT will just cut off", "ri = receptors[useqs[ki]] rj = receptors[useqs[kj]] dist = unAmbigDist(ski, skj, True) m_match =", "%s\" % (scodons[i], qi[0:3]), debug) if scodons[i] != \"...\": if scodons[i][0:2] == \"..\":", "log = OrderedDict() log[\"START\"] = \"CLEANING\" log[\"SCOPE\"] = clean printLog(log) todelete = open(outrep)", "parents=[parser_parent], formatter_class=CommonHelpFormatter, add_help=False) group = parser.add_argument_group(\"sequence processing arguments\") group.add_argument(\"--collapse\", action=\"store_true\", dest=\"collapse\", help=\"\"\"If specified,", "0: printWarning(\"Masked sequence suddenly has a PTC.. %s\\n\" % r.sequence_id) mout[1][\"PASS\"] = False", "getFormatOperators(format) except ValueError: printError(\"Invalid format %s.\" % format) out_fields = getDbFields(db_file, reader=reader) #", "of PTC if found. \"\"\" ptcs = (\"TAA\", \"TGA\", \"TAG\", \"TRA\", \"TRG\", \"TAR\",", "= True log[rfrom.sequence_id][\"COLLAPSETO\"] = joinseqs[k] log[rfrom.sequence_id][\"COLLAPSEFROM\"] = k log[rfrom.sequence_id][\"FAIL\"] = \"Collapsed with \"", "receptor objects. collapse (bool) : deduplicate sequences. nseqs (int): number of sequences. delim", "intermediate files; all: delete all intermediate files.\"\"\") igphyml_group.add_argument(\"--optimize\", action=\"store\", dest=\"optimization\", type=str, default=\"lr\", choices=(\"n\",\"r\",\"l\",\"lr\",\"tl\",\"tlr\"),", "main \"\"\" # Parse command line arguments parser = getArgParser() checkArgs(parser) args =", "(dict): arguments for output preferences. fail_writer (changeo.IO.TSVWriter): failed sequences writer object. Returns: 0:", "big_enough: if r.functional is None: r.functional = True if found_no_funct is False: printWarning(\"FUNCTIONAL", "argparse.ArgumentParser: argument parsers. \"\"\" # Define input and output field help message fields", "r.sequence_imgt logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"FWR/CDR error\" logs[r.sequence_id][\"FWRCDRSEQ\"] =", "#if len(germline) < len(sequences[i]): # print(\"\\n\" + str(clones[i].sequence_id)) # print(\"\\n \" + str((sequences[i]))", "nohlp (bool): If True, only estimate GY94 trees and parameters format (str): input", "log[\"PASS\"] = False log[\"FAIL\"] = \"NONFUNCTIONAL/PTC\" log[\"SEQ_IN\"] = r.sequence_input logs[r.sequence_id] = log if", "+ scodons[spos]+ \"\\t\" + qcodons[qpos], debug) qpos += 1 if qcodons[qpos-1] == scodons[ospos]:", "clonef.write(\">%s_%s\\n\" % (clones[0].clone,\"_\".join(germ_id))) for i in range(0, len(newgerm)): clonef.write(\"%s\" % newgerm[i].replace(\".\",\"-\")) clonef.write(\"\\n\") #output", "%s at position %d, but couldn't find upstream match\" % (scodons[ospos], ospos), debug)", "import ArgumentParser from collections import OrderedDict from textwrap import dedent from time import", "if j < len(germline): ngermline.append(germline[j]) nimgtar.append(imgtar[j]) else: ncdr3 += 1 clones[i].setField(\"imgtpartlabels\",nimgtar) clones[i].setField(\"germline_imgt_d_mask\", \"\".join(ngermline))", "all with --oformat txt will delete all tree file results.\\n\" \"You'll have to", "3 != 0: printError(\"number of sites must be divisible by 3! len: %d,", "codon, it was preserved qpos -= 1 spos = ospos printDebug(\"But codon was", "getFormatOperators, getOutputHandle, getOutputName from changeo.Alignment import RegionDefinition from changeo.Commandline import CommonHelpFormatter, checkArgs, getCommonArgParser,", "1] != qi[3:6]: qi = \"NN\" + qi spos = i break elif", "pisite = ospos * 3 if (psite + 3 + ins) < len(ros)", "\"--repfile\", outfile, \"-m\", \"GY\", \"--run_id\", \"gy\", \"--outrep\", outrep, \"--threads\", str(nproc),\"--outname\",gyout] hlp_args = [\"igphyml\",\"--repfile\",", "qcodons[qpos]: # if both are the same, move both forward spos += 1", "spos >= s_end or qcodons[qpos] != scodons[spos]: scodons[ospos] = \"NNN\" if spos >=", "\"--run_id\", \"gy\", \"--outrep\", outrep, \"--threads\", str(nproc),\"--outname\",gyout] hlp_args = [\"igphyml\",\"--repfile\", outrep, \"-m\", \"HLP\", \"--run_id\",", "j in range(0, nseqs): cid = \"\" if meta_data is not None: meta_data_list", "type=int, default=1, help=\"\"\"Minimum number of data sequences. Any clones with fewer than the", "building failed\") log = OrderedDict() log[\"OUTPUT\"] = igphyml_out if oformat == \"tab\": igf", "position %d, but couldn't find upstream match\" % (scodons[ospos], ospos), debug) log[\"PASS\"]=False log[\"FAIL\"]=\"FAILED_MATCH:\"+str(spos)", "kappa (str): kappa optimization in IgPhyML (-t) motifs (str): motifs to use in", "- 1):] #tally where --- gaps are in IMGT sequence and remove them", "records. lineages-fail database records failed processing. igphyml-pass parameter estimates and lineage trees from", "receptor.sequence_imgt return receptor.sequence_imgt, log else: curgap = 0 si = nsi scodons =", "+ confidence interval\"\"\") igphyml_group.add_argument(\"--motifs\", action=\"store\", dest=\"motifs\", type=str, default=\"WRC_2:0,GYW_0:1,WA_1:2,TW_0:3,SYC_2:4,GRS_0:5\", help=\"\"\"Which motifs to estimate mutability.\"\"\")", "os.path.join(out_dir, \"%s.part.txt\" % clones[0].clone) with open(partfile, \"w\") as partf: partf.write(\"%d %d\\n\" % (2,", "(--hotness) oformat (str): output format for IgPhyML (tab or txt) clean (str): delete", "+ str(len(cloneseqs[str(k)])) logs[clones[str(k)][j].sequence_id][\"PASS\"] = False clonesizes[str(k)] = -len(cloneseqs[str(k)]) else: clonesizes[str(k)] = outputIgPhyML(clones[str(k)], cloneseqs[str(k)],", "meta_data is not None: for i in range(1,len(meta_data)): germ_id.append(\"GERM\") clonef.write(\">%s_%s\\n\" % (clones[0].clone,\"_\".join(germ_id))) for", "writer object. Returns: 0: returns 0 if an error occurs or masking fails.", "default=1, help=\"\"\"Number of threads to parallelize IgPhyML across.\"\"\") igphyml_group.add_argument(\"--clean\", action=\"store\", choices=(\"none\", \"all\"), dest=\"clean\",", "\"NNN\" if \"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" + str(len(scodons)) else:", "str(len(cloneseqs[str(k)])) logs[clones[str(k)][j].sequence_id][\"PASS\"] = False clonesizes[str(k)] = -len(cloneseqs[str(k)]) else: clonesizes[str(k)] = outputIgPhyML(clones[str(k)], cloneseqs[str(k)], meta_data=meta_data,", "in the same clone.\", \"Be sure to cluster sequences into clones first and", "= False log[\"FAIL\"] = \"UNKNOWN\" def maskSplitCodons(receptor, recursive=False, mask=True): \"\"\" Identify junction region", "clones[r.clone].append(r) cloneseqs[r.clone].append(mask_seq) else: clones[r.clone] = [r] cloneseqs[r.clone] = [mask_seq] return 1 else: if", "type=str, default=\"lr\", choices=(\"n\",\"r\",\"l\",\"lr\",\"tl\",\"tlr\"), help=\"\"\"Optimize combination of topology (t) branch lengths (l) and parameters", "while qpos < len(qcodons) and spos < s_end and scodons[spos] != qcodons[qpos]: printDebug(\"Checking", "IgPhyML Arguments: db_file (str): input tab-delimited database file. meta_data (str): Field to append", "+= 1 while qpos < len(qcodons) and spos < s_end and scodons[spos] !=", "% (\"Predicted germlines are not the same among sequences in the same clone.\",", "shift the frame. This attempts to correct for this by looking at the", "object. recursive (bool) : was this method part of a recursive call? mask", "# this algorithm depends on the fact that all sequences are compared pairwise,", "log[\"START\"] = \"IgPhyML HLP analysis\" log[\"OPTIMIZE\"] = optimization log[\"TS/TV\"] = kappa log[\"wFWR,wCDR\"] =", "Create intermediate sequence alignment and partition files for IgPhyML output Arguments: clones (list):", "!= \"...\": tally += 1 tallies.append(tally) newseqs = [] # remove gap only", "m_match = True if meta_data is not None: matches = 0 for m", "lsplit = line.split(\"\\t\") if len(lsplit) == 4: os.remove(lsplit[0]) os.remove(lsplit[1]) os.remove(lsplit[3]) todelete.close() os.remove(outrep) os.remove(outfile)", "log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" + str(spos) else: log[\"END-MASKED\"] = str(spos) else:", "(bool): if True collapse identical sequences. ncdr3 (bool): if True remove CDR3 logs", "os.remove(lsplit[0]) os.remove(lsplit[1]) os.remove(lsplit[3]) todelete.close() os.remove(outrep) os.remove(outfile) os.remove(gyout) cilog = outrep + \"_igphyml_CIlog.txt_hlp\" if", "(scodons[ospos], ospos), debug) scodons[ospos] = \"NNN\" if \"MASKED\" in log: log[\"MASKED\"] = log[\"MASKED\"]", "of subject sequence\" % (scodons[ospos], ospos), debug) if \"END-MASKED\" in log: log[\"END-MASKED\"] =", "frames and indels of sequences\", start_time=start_time, width=50) #subsampling loop init_clone_sizes = {} big_enough", "init_clone_sizes[r.clone] >= min_seq: big_enough.append(r) fails[\"totalreads\"] = len(all_records) #fails[\"minseq_fail\"] = len(all_records) - len(big_enough) if", "(list) : list of receptor objects. collapse (bool) : deduplicate sequences. nseqs (int):", "> (len(germline)): seqdiff = sites - len(germline) germline = germline + \"N\" *", "useqs def hasPTC(sequence): \"\"\" Determines whether a PTC exits in a sequence Arguments:", "consensus sequences. duplicate (bool) : duplicate sequence if only one in a clone.", "not None: fail_writer.writeReceptor(clones[j]) else: useqs_f[conseq_f] = j conseqs.append(conseq) if collapse: useqs_f = deduplicate(useqs_f,", "up. log[\"FAIL\"] = \"FAILED_MATCH_QSTRING:\"+str(spos) #figure out if this was due to a frame-shift", "in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" + str(spos) else: log[\"END-MASKED\"] = str(spos)", "IgPhyML (tab or txt) clean (str): delete intermediate files? (none, all) nohlp (bool):", "in a clone. imgt (list) : IMGT numbering of clonal positions . \"\"\"", "0 else: #imgt_warn = \"\\n! IMGT FWR/CDR sequence columns not detected.\\n! Cannot run", "assignments are not the same within clone %d!\\n\" % c.clone,False) printError(c.getField(\"imgtpartlabels\"),False) printError(\"%s\\n%d\\n\" %", "to IMGT reference, then produces input files for IgPhyML Arguments: db_file (str): input", "True if found_no_funct is False: printWarning(\"FUNCTIONAL column not found.\") found_no_funct = True all_records.append(r)", "% (scodons[i], qi[0:3]), debug) if scodons[i] != \"...\": if scodons[i][0:2] == \"..\": scodons[i]", "ki = keys[i] kj = keys[j] if meta_data is None: ski = keys[i]", "joinseqs[kj] = ki # loop through list of joined sequences and collapse keys", "meta_data collapse (bool): if True collapse identical sequences. ncdr3 (bool): if True remove", "False log[\"FAIL\"] = \"FRAME-SHIFTING DELETION\" log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] =", "+ regions[\"fwr4_imgt\"] imgtpartlabels = [13]*len(regions[\"fwr1_imgt\"]) + [30]*len(regions[\"cdr1_imgt\"]) + [45]*len(regions[\"fwr2_imgt\"]) + \\ [60]*len(regions[\"cdr2_imgt\"]) +", "#print(\"imgtarlen: \" + str(len(imgtar))) #print(\"seqlen: \" + str(len(sequences[i]))) #print(\"germline: \" + str(len(germline))) #if", "+= 1 spos += 1 else: # if not the same, mask IMGT", "statements. Returns: tuple: (modified input sequence, modified starting position of IMGT sequence in", "len(scodons)): if scodons[i] != \"...\" and len(scodons[i]) == 3 and scodons[i] != \"NNN\":", "in range(0, len(sequence), 3): if sequence[i:(i+3)] in ptcs: return i return -1 def", "e.g. if reconstruction was done before clonal clustering resolveglines = False for c", "sequences to include. Returns: int: number of clones. \"\"\" s = \"\" delim", "ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"-t\", action=\"store\", dest=\"kappa\", type=str, default=\"e\", choices=(\"e\", \"ce\"),", "potential mismatches between IMGT labels within a clone Arguments: sequences (list): list of", "c in clones: if meta_data is not None: c.setField(meta_data[0],c.getField(meta_data_ar[0])) for m in range(1,len(meta_data_ar)):", "parser.add_argument_group(\"IgPhyML arguments (see igphyml -h for details)\") igphyml_group.add_argument(\"--igphyml\", action=\"store_true\", dest=\"igphyml\", help=\"\"\"Run IgPhyML on", "+= checkFrameShifts(receptor, oqpos, ospos, log, debug) elif spos >= s_end or qcodons[qpos] !=", "= len(imgtar)-len(cimgt) sequences[j] = sequences[j] + \"N\"*(seqdiff) last = cimgt[-1] cimgt.extend([last]*(imgtdiff)) clones[j].setField(\"imgtpartlabels\",cimgt) if", "len(regions[\"fwr3_imgt\"]) + \\ [108] * int(len(nseq)) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or", "= True if correctseqs: maxlen = sites maximgt = len(imgtar) for j in", "interest in IMGT sequence. log (dict) : log of information for each sequence.", "if ncj < ncounti: join[useqs[kj]] = useqs[ki] joinseqs[kj] = ki # loop through", "= kj else: ncj = 0 if useqs[kj] in join: ncj = ambigchar[join[useqs[kj]]]", "#possible next codon is just a gap spos += 1 while qpos <", "if scodons[-1] == \"..\" or scodons[-1] == \".\": scodons[-1] = \"...\" else: scodons[-1]", "subject sequence\" % (scodons[ospos], ospos), debug) if \"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"]", "lcodon=germline[i:(i+3)] imgt.append(imgtar[i]) if len(lcodon) == 2: newgerm[-1] = newgerm[-1] + \"N\" elif len(lcodon)", "is not None: if append is not None: for m in append: r.sequence_id", "specified.\"\"\") group.add_argument(\"--minseq\", action=\"store\", dest=\"min_seq\", type=int, default=1, help=\"\"\"Minimum number of data sequences. Any clones", "frameshifts def findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log, debug, recursive=False): \"\"\" Find", "will delete all tree file results.\\n\" \"You'll have to do that yourself.\") log", "= reader(handle) fail_handle, fail_writer = None, None if out_args[\"failed\"]: fail_handle = getOutputHandle(db_file, out_label=\"lineages-fail\",", "hlp_args = [\"igphyml\",\"--repfile\", outrep, \"-m\", \"HLP\", \"--run_id\", \"hlp\", \"--threads\", str(nproc), \"-o\", optimization, \"--omega\",", "+ regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] if len(simgt)", "c.clone, False) printError(c.getField(\"imgtpartlabels\"), False) printError(\"%s\\n%d\\n\" % (imgtar, j)) #Resolve germline if there are", "in log: log[\"MASKED\"] = log[\"MASKED\"] + \",\" + str(spos) else: log[\"MASKED\"] = str(spos)", "j in range(0, nseqs): for i in range(0, sites, 3): if i ==", "to shift the frame. This attempts to correct for this by looking at", "\"\" if meta_data is not None: meta_data_list = [] for m in meta_data:", "the fact that IMGT sequences can end on gaps for i in range(spos,", "meta_data. Returns: tuple: tuple of length four containing a list of IMGT positions", "printLog(log) if not nohlp: try: #estimate HLP parameters/trees p = subprocess.check_output(hlp_args) except subprocess.CalledProcessError", "germline = germline + \"N\" * (seqdiff) if sites % 3 != 0:", "them for now gaps = [] ndotgaps = [] nsi = \"\" for", "range(3,len(names)-1): log[names[i]] = round(float(vals[i]),2) printLog(log) if clean != \"none\": log = OrderedDict() log[\"START\"]", "for i in range(0, len(sequence), 3): if sequence[i:(i+3)] in ptcs: return i return", "within the same clone (share indexes with clones parameter). meta_data (str): Field to", "the first receptor in clones, the length of the first sequence in clones,", "action=\"store\", dest=\"sample_depth\", type=int, default=-1, help=\"\"\"Depth of reads to be subsampled (before deduplication).\"\"\") group.add_argument(\"--append\",", "\"\"\" if r.clone is None: printError(\"Cannot export datasets until sequences are clustered into", "clean (str): delete intermediate files? (none, all) nohlp (bool): If True, only estimate", "frameshifts = 0 s_end = 0 #adjust for the fact that IMGT sequences", "length of the first sequence in clones, and the number of sequences in", "min_seq (int): minimum number of data sequences to include. Returns: int: number of", "< min_seq: for j in range(0, nseqs): logs[clones[j].sequence_id][\"FAIL\"] = \"Clone too small: \"", "list of Receptor objects. \"\"\" for i in range(0,len(sequences)): imgtar = clones[i].getField(\"imgtpartlabels\") germline", "clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) if len(useqs_f) == 1 and duplicate: if meta_data", "md = md.replace(\",\",\"-\") #remove commas from metadata md = md.replace(\":\",\"-\") #remove colons from", "#imgt_warn = \"\\n! IMGT FWR/CDR sequence columns not detected.\\n! Cannot run CDR/FWR partitioned", "now gaps = [] ndotgaps = [] nsi = \"\" for i in", "clones with fewer than the specified number of sequences will be excluded.\"\"\") group.add_argument(\"--sample\",", "and parameters (r), or nothing (n), for IgPhyML.\"\"\") igphyml_group.add_argument(\"--omega\", action=\"store\", dest=\"omega\", type=str, default=\"e,e\",", "\"germlineptc\":0, \"fdcount\":0, \"totalreads\":0, \"passreads\":0, \"failreads\":0} # Mask codons split by indels start_time =", "nseqs): for i in range(0, sites, 3): if i == 0: newseqs.append([]) if", "Collapse can give misleading dupcount information if some sequences have ambiguous characters at", "init_clone_sizes[r.clone] = 1 for r in all_records: if target_clones is None or r.clone", "+ \".\" elif i == 0: ncon_seq = ncon_seq + concatenated_seq[counter] counter +=", "mask (bool) : mask split codons for use with igphyml? Returns: str: modified", "IMGT sequence qcodons (list): list of codons in input sequence spos (int): starting", "and output field help message fields = dedent( \"\"\" output files: <folder> folder", "out if this was due to a frame-shift by repeating this method but", "parser.parse_args() args_dict = parseCommonArgs(args) del args_dict[\"db_files\"] # Call main for each input file", "remove all CDR3s. nmask (bool): if False, do not attempt to mask split", "+ str(ncdr3)) def characterizePartitionErrors(sequences, clones, meta_data): \"\"\" Characterize potential mismatches between IMGT labels", "the same within clone %d!\\n\" % c.clone, False) printError(c.getField(\"imgtpartlabels\"), False) printError(\"%s\\n%d\\n\" % (imgtar,", "number of sequences in clones. \"\"\" sites = len(sequences[0]) nseqs = len(sequences) imgtar", "len(clones[seqi].getField(\"imgtpartlabels\")) != len(imgtar): correctseqs = True if correctseqs: maxlen = sites maximgt =", "various sequence processing failures. out_args (dict): arguments for output preferences. fail_writer (changeo.IO.TSVWriter): failed", "scodons[-1] = \"NNN\" if \"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" +", "= os.path.join(clone_dir, \"%s.fasta\" % key) partfile = os.path.join(clone_dir, \"%s.part.txt\" % key) if clonesizes[key]", "i == 1: ncon_seq = ncon_seq + \".\" elif i == 0: ncon_seq", "germlines using --cloned\")) if sites > (len(germline)): seqdiff = sites - len(germline) germline", "[13]*len(regions[\"fwr1_imgt\"]) + [30]*len(regions[\"cdr1_imgt\"]) + [45]*len(regions[\"fwr2_imgt\"]) + \\ [60]*len(regions[\"cdr2_imgt\"]) + [80]*len(regions[\"fwr3_imgt\"]) + [108] *", "\"NNN\" if spos >= s_end: printDebug(\"Masked %s at position %d, at end of", "Field to append to sequence IDs. Splits identical sequences with different meta_data collapse", "masking succeeds \"\"\" if r.clone is None: printError(\"Cannot export datasets until sequences are", "import RegionDefinition from changeo.Commandline import CommonHelpFormatter, checkArgs, getCommonArgParser, parseCommonArgs def correctMidCodonStart(scodons, qi, debug):", "!= \".\": if seq2[i] != \"N\" and seq2[i] != \"-\" and seq2[i] !=", "len(sequences[0]) nseqs = len(sequences) imgtar = clones[0].getField(\"imgtpartlabels\") germline = clones[0].getField(\"germline_imgt_d_mask\") if germline is", "+ ins) < len(ros) and (pisite + 3) < len(ris): #cut out 1", "hlp_args.append(str(asr)) log = OrderedDict() log[\"START\"] = \"IgPhyML GY94 tree estimation\" printLog(log) try: #check", "of interest in input sequence. ospos (int) : position of interest in IMGT", "log information for each sequence out_dir (str): directory for output files. fail_writer (changeo.IO.TSVWriter):", "igf.readline().split(\"\\t\") for i in range(3,len(names)-1): log[names[i]] = round(float(vals[i]),2) printLog(log) if clean != \"none\":", "j in range(0, nseqs): conseq = \"\".join([str(seq_rec) for seq_rec in newseqs[j]]) if meta_data", "in range(0, nseqs): conseq = \"\".join([str(seq_rec) for seq_rec in newseqs[j]]) if meta_data is", "%s at position %d %d\" % (scodons[spos], spos, qpos)) ospos=spos oqpos=qpos spos +=", "#If IMGT regions are provided, record their positions rd = RegionDefinition(r.junction_length, amino_acid=False) regions", "= str(spos) else: log[\"PASS\"] = False log[\"FAIL\"] = \"UNKNOWN\" def maskSplitCodons(receptor, recursive=False, mask=True):", "3 + ins):] receptor.sequence_imgt = ris[0:(pisite + 3)] + ris[(pisite + 3):] #", "= RegionDefinition(r.junction_length, amino_acid=False) regions = rd.getRegions(r.sequence_imgt) if regions[\"cdr3_imgt\"] is not \"\" and regions[\"cdr3_imgt\"]", "omega, \"-t\", kappa, \"--motifs\", motifs, \"--hotness\", hotness, \"--oformat\", oformat, \"--outname\", igphyml_out] if asr", "out_args[\"out_name\"] if dir_name is None: clone_dir = clone_name else: clone_dir = os.path.join(dir_name, clone_name)", "r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or simgt != r.sequence_imgt: log = OrderedDict()", ": print debugging statements. \"\"\" frameshifts = 0 for ins in range(1, 3):", "\"e,e\", \"ce,e\", \"e,ce\", \"ce,ce\"), help=\"\"\"Omega parameters to estimate for FWR,CDR respectively: e =", "(dict): counts of various sequence processing failures. out_args (dict): arguments for output preferences.", "s_end: printDebug(\"FAILING MATCH\", debug) log[\"PASS\"] = False #if no match for the adjacent", "= False log[rfrom.sequence_id][\"DUPLICATE\"] = True log[rfrom.sequence_id][\"COLLAPSETO\"] = joinseqs[k] log[rfrom.sequence_id][\"COLLAPSEFROM\"] = k log[rfrom.sequence_id][\"FAIL\"] =", "log: dict of sequence information \"\"\" debug = False qi = receptor.sequence_input si", "will just cut off first letter if non-match, at which point we\"ll just", "= \"...\" else: scodons[-1] = \"NNN\" if \"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"]", "not detected.\\n! Cannot run CDR/FWR partitioned model on this data.\\n\" imgtpartlabels = [0]", "scodons[i] != \"...\": if scodons[i][0:2] == \"..\": scodons[i] = \"NN\" + scodons[i][2] #sometimes", "(0-1).\"\"\") return parser if __name__ == \"__main__\": \"\"\" Parses command line arguments and", "sequences can end on gaps for i in range(spos, len(scodons)): if scodons[i] !=", "metadata md = md.replace(\",\",\"-\") #remove commas from metadata md = md.replace(\")\",\"-\") #remove parenthesis", "sequence s_end (int): end of IMGT sequence qpos (int): starting position of input", "Refusing to include sequence.\", debug) log[\"PASS\"] = False log[\"FAIL\"] = \"FRAME-SHIFTING DELETION\" log[\"SEQ_IN\"]", "0 and m_match: ncounti = ki.count(\"A\") + ki.count(\"T\") + ki.count(\"G\") + ki.count(\"C\") ncountj", "= [] ncdr3 = 0 #print(\"imgtarlen: \" + str(len(imgtar))) #print(\"seqlen: \" + str(len(sequences[i])))", "igphyml_out = None if igphyml: igphyml_out = getOutputName(db_file, out_label=\"igphyml-pass\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=oformat) dir_name,", "in range(0, len(sequences)): i = sequences[seqi] if len(i) != sites or len(clones[seqi].getField(\"imgtpartlabels\")) !=", "file results.\\n\" \"You'll have to do that yourself.\") log = OrderedDict() log[\"END\"] =", "logs[r.sequence_id][\"FWRCDRSEQ\"] = simgt fails[\"seq_fail\"] += 1 fails[\"region_fail\"] += 1 return 0 else: #imgt_warn", "in log: log[\"IN-FRAME\"] = log[\"IN-FRAME\"] + \",\" + str(spos) else: log[\"IN-FRAME\"] = str(spos)", "total == sample_depth: break # Start processing clones clonesizes = {} pass_count, nclones", "help=\"\"\"Kappa parameters to estimate: e = estimate, ce = estimate + confidence interval\"\"\")", "\\ regions[\"fwr3_imgt\"] nseq = r.sequence_imgt[len(simgt):len(r.sequence_imgt)] if len(simgt) < len(r.sequence_imgt): simgt = regions[\"fwr1_imgt\"] +", "= \"NN\" + qi spos = i break elif scodons[i][0] == \".\": scodons[i]", "sequences Argument: useqs (dict): unique sequences within a clone. maps sequence to index", "append to sequence_id igphyml (bool): If True, run IgPhyML on outputted data nproc", "tally += 1 tallies.append(tally) newseqs = [] # remove gap only sites from", "else: clones[r.clone] = [r] cloneseqs[r.clone] = [mask_seq] return 1 else: if out_args[\"failed\"]: fail_writer.writeReceptor(r)", "== 0: ncon_seq = ncon_seq + concatenated_seq[counter] counter += 1 ncon_seq = ncon_seq", "--cloned\")) if sites > (len(germline)): seqdiff = sites - len(germline) germline = germline", "useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt): \"\"\" Create intermediate", "\"-\"))) if len(useqs_f) == 1 and duplicate: if meta_data is not None: if", "#if IMGT gap, move forward in imgt spos += 1 elif scodons[spos] ==", "str(spos) else: log[\"MASKED\"] = str(spos) else: log[\"PASS\"] = False log[\"FAIL\"] = \"UNKNOWN\" def", "mout = maskSplitCodons(r, mask=mask) mask_seq = mout[0] ptcs = hasPTC(mask_seq) if ptcs >=", "printError(\"igphyml not found :-/\") try: #get GY94 starting topologies p = subprocess.check_output(gy_args) except", "[] ncdr3 = 0 #print(\"imgtarlen: \" + str(len(imgtar))) #print(\"seqlen: \" + str(len(sequences[i]))) #print(\"germline:", "meta_data[0].split(\",\") for c in clones: if meta_data is not None: c.setField(meta_data[0],c.getField(meta_data_ar[0])) for m", "in useqs dict). log (collections.OrderedDict): log of sequence errors. meta_data (str): Field to", "ncounti: nci = 0 if useqs[ki] in join: nci = ambigchar[join[useqs[ki]]] if nci", "igphyml: runIgPhyML(pass_handle.name, igphyml_out=igphyml_out, clone_dir=clone_dir, nproc=nproc, optimization=optimization, omega=omega, kappa=kappa, motifs=motifs, hotness=hotness, oformat=oformat, nohlp=nohlp,clean=clean,asr=asr) return", "log[\"MASKED\"] + \",\" + str(spos) else: log[\"MASKED\"] = str(spos) else: log[\"PASS\"] = False", "True collapse identical sequences. ncdr3 (bool): if True remove all CDR3s. nmask (bool):", "fails[\"seq_fail\"] += 1 fails[\"failreads\"] += r.dupcount if mout[1][\"FAIL\"] == \"FRAME-SHIFTING DELETION\": fails[\"del_fail\"] +=", "each sequence out_dir (str): directory for output files. fail_writer (changeo.IO.TSVWriter): failed sequences writer", "len(si), 3)] # deal with the fact that it's possible to start mid-codon", "A,T,C,Gs Arguments: seq1 (str): sequence 1 seq2 (str): sequence 2 fbreak (bool): break", "+= 1 clones[i].setField(\"imgtpartlabels\",nimgtar) clones[i].setField(\"germline_imgt_d_mask\", \"\".join(ngermline)) sequences[i] = \"\".join(nseq) #print(\"Length: \" + str(ncdr3)) def", "+= 1 fails[\"region_fail\"] += 1 return 0 else: #imgt_warn = \"\\n! IMGT FWR/CDR", "Determines whether a PTC exits in a sequence Arguments: sequence (str): IMGT gapped", "oformat, \"--outname\", igphyml_out] if asr >= 0: hlp_args.append(\"--ASRc\") hlp_args.append(str(asr)) log = OrderedDict() log[\"START\"]", "intermediate sequence alignment and partition files for IgPhyML output Arguments: out_dir (str): directory", "nohlp=nohlp,clean=clean,asr=asr) return output def getArgParser(): \"\"\" Defines the ArgumentParser Returns: argparse.ArgumentParser: argument parsers.", "== \"SINGLE FRAME-SHIFTING INSERTION\": fails[\"in_fail\"] += 1 else: fails[\"other_fail\"] += 1 else: log", "you find a codon that matches next site if debug: print(\"checking %s at", "= getFormatOperators(format) except ValueError: printError(\"Invalid format %s.\" % format) out_fields = getDbFields(db_file, reader=reader)", "False: printWarning(\"FUNCTIONAL column not found.\") found_no_funct = True r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas", "of sequences in clones. clones (list): list of Receptor objects. \"\"\" for i", "log[\"END-MASKED\"] + \",\" + str(spos) else: log[\"END-MASKED\"] = str(spos) else: printDebug(\"Masked %s at", "in range(0,len(sequences)): imgtar = clones[i].getField(\"imgtpartlabels\") germline = clones[i].getField(\"germline_imgt_d_mask\") nseq = [] nimgtar =", "modified starting position of IMGT sequence in input sequence). \"\"\" spos = 0", "print('error>', e.output, '<') printError(\"GY94 tree building in IgPhyML failed\") log = OrderedDict() log[\"START\"]", "def correctMidCodonStart(scodons, qi, debug): \"\"\" Find and mask split codons Arguments: scodons (list):", "region by IMGT definition. Arguments: receptor (changeo.Receptor.Receptor): Receptor object. recursive (bool) : was", "for i in range(spos, len(scodons)): if scodons[i] != \"...\" and len(scodons[i]) == 3", "logs.keys(): printLog(logs[j], handle=log_handle) pass_handle.write(str(nclones)+\"\\n\") for key in sorted(clonesizes, key=clonesizes.get, reverse=True): #print(key + \"\\t\"", "0 for r in big_enough: if r.functional is None: r.functional = True if", "is not None: if meta_data[0] == \"DUPCOUNT\": cid = delim + \"0\" sid", "debug) mout = maskSplitCodons(receptor, recursive=True) if mout[1][\"PASS\"]: #if debug: receptor.sequence_input = ros receptor.sequence_imgt", "len(sequences[i]): # print(\"\\n\" + str(clones[i].sequence_id)) # print(\"\\n \" + str((sequences[i])) ) # print(\"\\n\"", "1 while qpos < len(qcodons) and spos < s_end and scodons[spos] != qcodons[qpos]:", "if(meta_data is not None): for m in range(0,len(meta_data)): md = r.getField(meta_data[m]) md =", "action=\"store\", dest=\"min_seq\", type=int, default=1, help=\"\"\"Minimum number of data sequences. Any clones with fewer", "of sequences in clones. \"\"\" sites = len(sequences[0]) nseqs = len(sequences) imgtar =", "!= \"NNN\": s_end = i printDebug(\"%i:%i:%s\" % (s_end, len(scodons), scodons[s_end]), debug) s_end +=", "joinseqs[k] log[rfrom.sequence_id][\"COLLAPSEFROM\"] = k log[rfrom.sequence_id][\"FAIL\"] = \"Collapsed with \" + rto.sequence_id del useqs[k]", "range(0, len(newgerm)): clonef.write(\"%s\" % newgerm[i].replace(\".\",\"-\")) clonef.write(\"\\n\") #output partition file partfile = os.path.join(out_dir, \"%s.part.txt\"", "+ regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"]", "if si[i] != \".\": ndotgaps.append(0) #find any gaps not divisible by three curgap", "conseqs.append(conseq) if collapse: useqs_f = deduplicate(useqs_f, clones, logs, meta_data, delim) if collapse and", "join: ncj = ambigchar[join[useqs[kj]]] if ncj < ncounti: join[useqs[kj]] = useqs[ki] joinseqs[kj] =", "\"TRR\") for i in range(0, len(sequence), 3): if sequence[i:(i+3)] in ptcs: return i", "we\"ll just want to mask the #first codon in the IMGT seq, other", "clone Arguments: sequences (list): list of sequences in clones. clones (list): list of", "oqpos=qpos spos += 1 qpos += 1 while spos < s_end and scodons[spos]", "i # add --- gaps back to IMGT sequence ncon_seq = \"\" counter", "\\ [108] * int(len(nseq)) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or simgt !=", "partition file partfile = os.path.join(out_dir, \"%s.part.txt\" % clones[0].clone) with open(partfile, \"w\") as partf:", "getArgParser() checkArgs(parser) args = parser.parse_args() args_dict = parseCommonArgs(args) del args_dict[\"db_files\"] # Call main", "Argument: useqs (dict): unique sequences within a clone. maps sequence to index in", "i == 0: newseqs.append([]) if tallies[i//3] > 0: newseqs[j].append(sequences[j][i:(i+3)]) lcodon = \"\" for", "in keys: if useqs[k] in join: rfrom = receptors[useqs[k]] rto = receptors[join[useqs[k]]] rto.dupcount", "= r.clone log[\"PASS\"] = False log[\"FAIL\"] = \"NONFUNCTIONAL/PTC\" log[\"SEQ_IN\"] = r.sequence_input logs[r.sequence_id] =", "= OrderedDict() log[\"OUTPUT\"] = os.path.basename(pass_handle.name) if pass_handle is not None else None log[\"RECORDS\"]", "misleading dupcount information if some sequences have ambiguous characters at polymorphic sites def", "Returns: tuple: tuple of length four containing a list of IMGT positions for", "hotness=\"e,e,e,e,e,e\", oformat=\"tab\", clean=\"none\", nohlp=False, asr=-1, format=default_format, out_args=default_out_args): \"\"\" Masks codons split by alignment", "= [si[i:i + 3] for i in range(0, len(si), 3)] # deal with", "+= 1 elif qcodons[qpos] == \"N\": # possible that SEQ-IMGT ends on a", "= sites - len(germline) germline = germline + \"N\" * (seqdiff) if sites", "in init_clone_sizes: init_clone_sizes[r.clone] += 1 else: init_clone_sizes[r.clone] = 1 for r in all_records:", "# id -> useq to join with (least ambiguous chars) ambigchar = {}", "for the fact that IMGT sequences can end on gaps for i in", "% (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) if nseqs == 1 and duplicate: if meta_data is", "clones[r.clone] = [r] cloneseqs[r.clone] = [mask_seq] return 1 else: if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"]", "= ncountj # this algorithm depends on the fact that all sequences are", "fails[\"seq_fail\"] += 1 fails[\"nf_fail\"] += 1 return 0 # Run IgPhyML on outputed", "tuple: tuple of length four containing a list of IMGT positions for first", "(str): sequence 2 fbreak (bool): break after first difference found? Returns: int: number", "= receptor.sequence_input ris = receptor.sequence_imgt psite = receptor.v_seq_start - 1 + oqpos*3 pisite", "of the first sequence in clones, and the number of sequences in clones.", "subsampling before deduplication min_seq (int): minimum number of sequences per clone append (str):", "(len(germline)): seqdiff = sites - len(germline) germline = germline + \"N\" * (seqdiff)", "log[\"CLONE\"] = r.clone log[\"SEQ_IN\"] = r.sequence_input log[\"SEQ_IMGT\"] = r.sequence_imgt logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"]", "masking fails. 1: returns 1 masking succeeds \"\"\" if r.clone is None: printError(\"Cannot", "in frame 1. Returns: int: negative if not PTCs, position of PTC if", "= \"NNN\" if spos >= s_end: printDebug(\"Masked %s at position %d, at end", "rd = RegionDefinition(r.junction_length, amino_acid=False) regions = rd.getRegions(r.sequence_imgt) if regions[\"cdr3_imgt\"] is not \"\" and", "clones[j].sequence_id.translate(transtable)+\"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) germ_id = [\"GERM\"] if meta_data", "\"\"\" s = \"\" delim = \"_\" duplicate = True # duplicate sequences", "to analyze. collapse (bool): if True collapse identical sequences. ncdr3 (bool): if True", "sites must be divisible by 3! len: %d, clone: %s , id: %s,", "len(r.sequence_imgt): simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"]", "+= 1 tallies.append(tally) newseqs = [] # remove gap only sites from observed", "args_dict[\"db_files\"] # Call main for each input file for f in args.__dict__[\"db_files\"]: args_dict[\"db_file\"]", "0 for i in ndotgaps: if i == 1: curgap += 1 elif", "\":\" + ncon_seq) if i == 1: ncon_seq = ncon_seq + \".\" elif", "(dict): unique sequences within a clone. maps sequence to index in Receptor list.", "if len(simgt) < len(r.sequence_imgt): r.fwr4_imgt = r.fwr4_imgt + (\".\"*(len(r.sequence_imgt) - len(simgt))) simgt =", "clones (list): list of receptors. cloneseqs (list): list of masked clone sequences. logs", "1 printDebug(\"FRAMESHIFT of length %d!\" % ins, debug) log[\"FAIL\"] = \"SINGLE FRAME-SHIFTING INSERTION\"", "codon was apparently preserved\", debug) if \"IN-FRAME\" in log: log[\"IN-FRAME\"] = log[\"IN-FRAME\"] +", "dir_name, __ = os.path.split(pass_handle.name) if out_args[\"out_name\"] is None: __, clone_name, __ = splitName(db_file)", "for each clone. lineages successfully processed records. lineages-fail database records failed processing. igphyml-pass", "and len(conseqs) < min_seq: for j in range(0, nseqs): logs[clones[j].sequence_id][\"FAIL\"] = \"Clone too", "use in IgPhyML (--motifs) hotness (str): motif in IgPhyML (--hotness) oformat (str): output", "gaps: #print(str(i) + \":\" + ncon_seq) if i == 1: ncon_seq = ncon_seq", "> maxlen: maxlen = len(sequences[j]) if len(clones[j].getField(\"imgtpartlabels\")) > maximgt: imgtar = clones[j].getField(\"imgtpartlabels\") maximgt", "= omega log[\"MOTIFS\"] = motifs log[\"HOTNESS\"] = hotness log[\"NPROC\"] = nproc printLog(log) if", "the fact that all sequences are compared pairwise, and all are zero #", "--clean all with --oformat txt will delete all tree file results.\\n\" \"You'll have", "log = OrderedDict() log[\"OUTPUT\"] = os.path.basename(pass_handle.name) if pass_handle is not None else None", "i in range(1, len(meta_data)): germ_id.append(\"GERM\") pass_handle.write(\"%s\\t%s\\t%s_%s\\t%s\\n\" % (outfile, \"N\", key,\"_\".join(germ_id), partfile)) handle.close() output", "IDs. Splits identical sequences with different meta_data. delim (str): delimited to use when", "\".\".join(osplit[0:(len(osplit)-1)]) + \"_gy.tsv\" gyout = outfile + \"_igphyml_stats_gy.txt\" gy_args = [\"igphyml\", \"--repfile\", outfile,", "seq_f, num in useqs_f.items(): logs[clones[num].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(useqs_f)) logs[clones[num].sequence_id][\"PASS\"]", "\"\"\" # Info __author__ = \"<NAME>\" from changeo import __version__, __date__ # Imports", "\"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" + str(len(scodons)) else: log[\"END-MASKED\"] =", "else: log[\"MASKED\"] = str(spos) else: log[\"PASS\"] = False log[\"FAIL\"] = \"UNKNOWN\" def maskSplitCodons(receptor,", "cut off first letter if non-match, at which point we\"ll just want to", "r.setField(meta_data[m],md) if append is not None: if append is not None: for m", "r.functional and ptcs < 0: #If IMGT regions are provided, record their positions", "\"-\" and seq2[i] != \".\": if seq1[i] != seq2[i]: dist += 1 if", "\"%s.part.txt\" % clones[0].clone) with open(partfile, \"w\") as partf: partf.write(\"%d %d\\n\" % (2, len(newgerm)))", "if not os.path.exists(clone_dir): os.makedirs(clone_dir) # Format options try: reader, writer, __ = getFormatOperators(format)", "None: for i in range(1, len(meta_data)): germ_id.append(\"GERM\") pass_handle.write(\"%s\\t%s\\t%s_%s\\t%s\\n\" % (outfile, \"N\", key,\"_\".join(germ_id), partfile))", "clone IDs to analyze. collapse (bool): if True collapse identical sequences. ncdr3 (bool):", "in IMGT sequence qcodons (list): list of codons in input sequence spos (int):", "nohlp: try: #estimate HLP parameters/trees p = subprocess.check_output(hlp_args) except subprocess.CalledProcessError as e: print(\"", "be divisible by 3! len: %d, clone: %s , id: %s, seq: %s\"", "= parser.parse_args() args_dict = parseCommonArgs(args) del args_dict[\"db_files\"] # Call main for each input", "threads to parallelize IgPhyML across optimization (str): Optimize combination of topology (t) branch", "size is returned as a negative if clonesizes[str(k)] > 0: nclones += 1", "seqdiff = sites - len(germline) germline = germline + \"N\" * (seqdiff) if", "nmask=False, sample_depth=-1, min_seq=1,append=None, igphyml=False, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", clean=\"none\", nohlp=False,", "= unAmbigDist(ski, skj, True) m_match = True if meta_data is not None: matches", "delimited to use when appending meta_data. Returns: list: deduplicated receptors within a clone.", "downstream of offending codon receptor.sequence_input = ros[0:(psite + 3)] + ros[(psite + 3", "= \"Collapsed with \" + rto.sequence_id del useqs[k] return useqs def hasPTC(sequence): \"\"\"", "< len(sequences[i]): # print(\"\\n\" + str(clones[i].sequence_id)) # print(\"\\n \" + str((sequences[i])) ) #", "useqs[kj] in join: ncj = ambigchar[join[useqs[kj]]] if ncj < ncounti: join[useqs[kj]] = useqs[ki]", "== 3 and scodons[i] != \"NNN\": s_end = i printDebug(\"%i:%i:%s\" % (s_end, len(scodons),", "processing. igphyml-pass parameter estimates and lineage trees from running IgPhyML, if specified required", "Arguments: seq1 (str): sequence 1 seq2 (str): sequence 2 fbreak (bool): break after", "\"lineages\" pass_handle = getOutputHandle(db_file, out_label=out_label, out_dir=out_args[\"out_dir\"], out_name= out_args[\"out_name\"], out_type=\"tsv\") igphyml_out = None if", "r in records: if r.functional is None: r.functional = True if found_no_funct is", "if seq2[i] != \"N\" and seq2[i] != \"-\" and seq2[i] != \".\": if", "i in range(spos, len(scodons)): if scodons[i] != \"...\" and len(scodons[i]) == 3 and", "recursive=False): \"\"\" Find and mask split codons Arguments: receptor (changeo.Receptor.Receptor): Receptor object. scodons", "for m in append: r.sequence_id = r.sequence_id + \"_\" + r.getField(m) total +=", "#remove parenthesis from sequence ID r.sequence_id = r.sequence_id.replace(\"(\",\"-\") #remove parenthesis from sequence ID", "default=\"none\", help=\"\"\"Delete intermediate files? none: leave all intermediate files; all: delete all intermediate", "+ 3] for i in range(0, len(qi), 3)] frameshifts = 0 s_end =", "in range(0,len(imgtar)): if c.getField(\"imgtpartlabels\")[j] != imgtar[j]: printError(\"IMGT assignments are not the same within", "out_args (dict): arguments for output preferences. Returns: dict: dictionary of output pass and", "0 if useqs[ki] in join: nci = ambigchar[join[useqs[ki]]] if nci < ncountj: join[useqs[ki]]", "cloneseqs = {} clones = {} logs = OrderedDict() fails = {\"rec_count\":0, \"seq_fail\":0,", "fails[\"germlineptc\"] log[\"OTHER_FAIL\"] = fails[\"other_fail\"] if collapse: log[\"DUPLICATE\"] = fail_count - fails[\"seq_fail\"] log[\"END\"] =", "if len(scodons[-1]) != 3: if scodons[-1] == \"..\" or scodons[-1] == \".\": scodons[-1]", "r.sequence_input log[\"SEQ_IMGT\"] = r.sequence_imgt logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"Germline", "imgt spos += 1 elif scodons[spos] == qcodons[qpos]: # if both are the", "help message fields = dedent( \"\"\" output files: <folder> folder containing fasta and", "log_handle = None if out_args[\"log_file\"] is not None: log_handle = open(out_args[\"log_file\"], \"w\") for", "nimgt.append(imgt[j]) else: ncdr3 += 1 newseqs[i] = nseq newgerm = ngerm imgt =", "= fail_count log[\"NONFUNCTIONAL\"] = fails[\"nf_fail\"] log[\"FRAMESHIFT_DEL\"] = fails[\"del_fail\"] log[\"FRAMESHIFT_INS\"] = fails[\"in_fail\"] log[\"CLONETOOSMALL\"] =", "(psite + 3 + ins) < len(ros) and (pisite + 3) < len(ris):", "+ cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) if len(useqs_f) == 1 and duplicate:", "Parse command line arguments parser = getArgParser() checkArgs(parser) args = parser.parse_args() args_dict =", "= False for c in clones: ngermline = c.getField(\"germline_imgt_d_mask\") if ngermline is \"\":", "input and output field help message fields = dedent( \"\"\" output files: <folder>", "ospos, log, debug): \"\"\" Checks whether a frameshift occured in a sequence Arguments:", "ins, debug) mout = maskSplitCodons(receptor, recursive=True) if mout[1][\"PASS\"]: #if debug: receptor.sequence_input = ros", "yourself.\") log = OrderedDict() log[\"END\"] = \"IgPhyML analysis\" printLog(log) # Note: Collapse can", "range(0,len(imgtar)): if imgtar[j] != 108: nseq.append(sequences[i][j]) if j < len(germline): ngermline.append(germline[j]) nimgtar.append(imgtar[j]) else:", "first and then predict germlines using --cloned\")) if sites > (len(germline)): seqdiff =", "clones.keys(): if len(clones[str(k)]) < min_seq: for j in range(0, len(clones[str(k)])): logs[clones[str(k)][j].sequence_id][\"FAIL\"] = \"Clone", "if fail_writer is not None: fail_writer.writeReceptor(clones[j]) else: useqs_f[conseq_f] = j conseqs.append(conseq) if collapse:", "sequences with different meta_data. delim (str): delimited to use when appending meta_data. Returns:", "outfile = os.path.join(clone_dir, \"%s.fasta\" % key) partfile = os.path.join(clone_dir, \"%s.part.txt\" % key) if", "of ACGT differences. \"\"\" if len(seq1) != len(seq2): printError(\"Sequences are not the same", "delimiter for extracting metadata from ID. newgerm (str) : modified germline of clonal", "os.path.join(clone_dir, \"%s.part.txt\" % key) if clonesizes[key] > 0: germ_id = [\"GERM\"] if meta_data", "Debug sequence modifications printDebug(ros, debug) printDebug(receptor.sequence_input, debug) printDebug(ris, debug) printDebug(receptor.sequence_imgt, debug) printDebug(\"RUNNING %d\"", "to append to sequence IDs. Splits identical sequences with different meta_data. meta_data (str):", "interest in input sequence. ospos (int) : position of interest in IMGT sequence.", "at position %d %d\" % (scodons[spos], spos, qpos)) ospos=spos oqpos=qpos spos += 1", "len(regions[\"cdr3_imgt\"]) + \\ [120] * len(regions[\"fwr4_imgt\"]) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or", "[mask_seq] return 1 else: if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"failreads\"] += r.dupcount", "range(0, nseqs): logs[clones[j].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(conseqs)) logs[clones[j].sequence_id][\"PASS\"] = False", "\"..\": scodons[i] = \"NN\" + scodons[i][2] #sometimes IMGT will just cut off first", "receptor.sequence_input ris = receptor.sequence_imgt psite = receptor.v_seq_start - 1 + oqpos*3 pisite =", "regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] imgtpartlabels = [13]*len(regions[\"fwr1_imgt\"]) + [30]*len(regions[\"cdr1_imgt\"]) + [45]*len(regions[\"fwr2_imgt\"]) + \\ [60]*len(regions[\"cdr2_imgt\"])", "log[\"CLONE\"]=receptor.clone log[\"PASS\"] = True if debug: print(receptor.sequence_id) # adjust starting position of query", "these data if desired lg = len(newgerm) sites = range(0, lg) transtable =", "else: if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"failreads\"] += r.dupcount if mout[1][\"FAIL\"] ==", "len(all_records) #fails[\"minseq_fail\"] = len(all_records) - len(big_enough) if len(big_enough) == 0: printError(\"\\n\\nNo sequences found", "target_clones (str): List of clone IDs to analyze. collapse (bool): if True collapse", "scodons[ospos] = \"NNN\" if \"MASKED\" in log: log[\"MASKED\"] = log[\"MASKED\"] + \",\" +", "\".\": scodons[i] = \"N\" + scodons[i][1:3] if scodons[i][1:3] != qi[1:3] or scodons[i+1] !=", "i in range(0, len(newseqs)): nseq = [] ncdr3 = 0 for j in", "to original codon, it was preserved qpos -= 1 spos = ospos printDebug(\"But", "= [13] * len(regions[\"fwr1_imgt\"]) + [30] * len(regions[\"cdr1_imgt\"]) + [45] * len( regions[\"fwr2_imgt\"])", "match\" % (scodons[ospos], ospos), debug) log[\"PASS\"]=False log[\"FAIL\"]=\"FAILED_MATCH:\"+str(spos) elif qcodons[qpos] == scodons[spos]: printDebug(\"Masked %s", "md.replace(\",\",\"-\") #remove commas from metadata md = md.replace(\":\",\"-\") #remove colons from metadata md", "0 while spos < s_end and qpos < len(qcodons): if debug: print(scodons[spos] +", "too small: \" + str(len(cloneseqs[str(k)])) logs[clones[str(k)][j].sequence_id][\"PASS\"] = False clonesizes[str(k)] = -len(cloneseqs[str(k)]) else: clonesizes[str(k)]", "scodons[i][0:2] == \"..\": scodons[i] = \"NN\" + scodons[i][2] #sometimes IMGT will just cut", "> 0: nclones += 1 pass_count += clonesizes[str(k)] else: fails[\"seq_fail\"] -= clonesizes[str(k)] fails[\"minseq_fail\"]", "recursive: log[\"FRAMESHIFTS\"] = frameshifts if len(scodons[-1]) != 3: if scodons[-1] == \"..\" or", "def rmCDR3(sequences, clones): \"\"\" Remove CDR3 from all sequences and germline of a", "elif mout[1][\"FAIL\"] == \"SINGLE FRAME-SHIFTING INSERTION\": fails[\"in_fail\"] += 1 else: fails[\"other_fail\"] += 1", "dest=\"nohlp\", help=\"\"\"Don't run HLP model?\"\"\") igphyml_group.add_argument(\"--asr\", action=\"store\", dest=\"asr\", type=float, default=-1, help=\"\"\"Ancestral sequence reconstruction", "from sequence ID r.sequence_id = r.sequence_id.replace(\"(\",\"-\") #remove parenthesis from sequence ID if(meta_data is", "= 0 for j in range(0, len(imgt)): if imgt[j] != 108: nseq.append(newseqs[i][j]) if", "codon was found, something\"s up. log[\"FAIL\"] = \"FAILED_MATCH_QSTRING:\"+str(spos) #figure out if this was", "qcodons[qpos] == scodons[spos]: printDebug(\"Masked %s at position %d\" % (scodons[ospos], ospos), debug) scodons[ospos]", "len(sequences)): i = sequences[seqi] if len(i) != sites or len(clones[seqi].getField(\"imgtpartlabels\")) != len(imgtar): correctseqs", "r.clone in target_clones: if init_clone_sizes[r.clone] >= min_seq: big_enough.append(r) fails[\"totalreads\"] = len(all_records) #fails[\"minseq_fail\"] =", "position of input sequence in IMGT sequence log (dict): log of information for", "within a clone. maps sequence to index in Receptor list. receptors (dict): receptors", "= [] nimgtar = [] ngermline = [] ncdr3 = 0 #print(\"imgtarlen: \"", "loop init_clone_sizes = {} big_enough = [] all_records = [] found_no_funct = False", "Field to append to sequence IDs. Splits identical sequences with different meta_data. meta_data", "logs[r.sequence_id][\"FAIL\"] = \"Germline PTC\" fails[\"seq_fail\"] += 1 fails[\"germlineptc\"] += 1 return 0 if", "+ 3) < len(ris): #cut out 1 or 2 nucleotides downstream of offending", "find a codon that matches next site if debug: print(\"checking %s at position", "cluster sequences into clones first and then predict germlines using --cloned\")) if sites", "< s_end and scodons[spos] != qcodons[qpos]: printDebug(\"Checking \" + scodons[spos]+ \"\\t\" + qcodons[qpos],", "[] nsi = \"\" for i in range(0,len(si)): if si[i] == \"-\": gaps.append(1)", "out_label=\"igphyml-pass\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=oformat) dir_name, __ = os.path.split(pass_handle.name) if out_args[\"out_name\"] is None: __,", "+ \",\" + str(spos) else: log[\"IN-FRAME\"] = str(spos) elif qpos >= len(qcodons) and", "estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--motifs\", action=\"store\", dest=\"motifs\", type=str, default=\"WRC_2:0,GYW_0:1,WA_1:2,TW_0:3,SYC_2:4,GRS_0:5\", help=\"\"\"Which motifs to estimate", "r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID r.sequence_id = r.sequence_id.replace(\":\",\"-\") #remove colons from sequence", "if regions[\"cdr3_imgt\"] is not \"\" and regions[\"cdr3_imgt\"] is not None: simgt = regions[\"fwr1_imgt\"]", "must be divisible by 3! len: %d, clone: %s , id: %s, seq:", "fasta file sequence headers.\"\"\") group.add_argument(\"--clones\", nargs=\"+\", action=\"store\", dest=\"target_clones\", help=\"\"\"List of clone IDs to", "from metadata md = md.replace(\"(\",\"-\") #remove parenthesis from metadata r.setField(meta_data[m],md) if append is", "return receptor.sequence_imgt, log else: curgap = 0 si = nsi scodons = [si[i:i", "len(newseqs)): nseq = [] ncdr3 = 0 for j in range(0, len(imgt)): if", ": deduplicate sequences. nseqs (int): number of sequences. delim (str) : delimiter for", "tree building in IgPhyML failed\") log = OrderedDict() log[\"START\"] = \"IgPhyML HLP analysis\"", "= \"\".join([str(seq_rec) for seq_rec in newseqs[j]])+delim+\":\".join(meta_data_list) else: conseq_f = conseq if conseq_f in", "regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] if len(simgt) < len(r.sequence_imgt): r.fwr4_imgt = r.fwr4_imgt +", "+ scodons[i][2] #sometimes IMGT will just cut off first letter if non-match, at", "= [] all_records = [] found_no_funct = False for r in records: if", "log[\"GERMLINE_PTC\"] = fails[\"germlineptc\"] log[\"OTHER_FAIL\"] = fails[\"other_fail\"] if collapse: log[\"DUPLICATE\"] = fail_count - fails[\"seq_fail\"]", "in range(0,len(sequences)): cimgt = clones[j].getField(\"imgtpartlabels\") seqdiff = maxlen - len(sequences[j]) imgtdiff = len(imgtar)-len(cimgt)", "if oformat == \"tab\": os.rmdir(clone_dir) else: printWarning(\"Using --clean all with --oformat txt will", "delim (str) : delimiter for extracting metadata from ID. newgerm (str) : modified", "\"-\"))) germ_id = [\"GERM\"] if meta_data is not None: for i in range(1,len(meta_data)):", "{} logs = OrderedDict() fails = {\"rec_count\":0, \"seq_fail\":0, \"nf_fail\":0, \"del_fail\":0, \"in_fail\":0, \"minseq_fail\":0, \"other_fail\":0,", "parseCommonArgs(args) del args_dict[\"db_files\"] # Call main for each input file for f in", "= fails[\"nf_fail\"] log[\"FRAMESHIFT_DEL\"] = fails[\"del_fail\"] log[\"FRAMESHIFT_INS\"] = fails[\"in_fail\"] log[\"CLONETOOSMALL\"] = fails[\"minseq_fail\"] log[\"CDRFWR_ERROR\"] =", "qi,spos = correctMidCodonStart(scodons, qi, debug) qcodons = [qi[i:i + 3] for i in", "clones, collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt): \"\"\" Create intermediate sequence alignment", "hasPTC(r.sequence_imgt) gptcs = hasPTC(r.getField(\"germline_imgt_d_mask\")) if gptcs >= 0: log = OrderedDict() log[\"ID\"] =", "delete intermediate files? (none, all) \"\"\" osplit = outfile.split(\".\") outrep = \".\".join(osplit[0:(len(osplit)-1)]) +", "-> number ATCG nucleotides for i in range(0,len(keys)-1): for j in range(i+1,len(keys)): ki", "to ensure uniqueness.\"\"\") igphyml_group = parser.add_argument_group(\"IgPhyML arguments (see igphyml -h for details)\") igphyml_group.add_argument(\"--igphyml\",", "clonesizes = {} pass_count, nclones = 0, 0 printMessage(\"Processing clones\", start_time=start_time, width=50) for", "subprocess.check_output(gy_args) except subprocess.CalledProcessError as e: print(\" \".join(gy_args)) print('error>', e.output, '<') printError(\"GY94 tree building", "break # Start processing clones clonesizes = {} pass_count, nclones = 0, 0", "if True collapse identical sequences. ncdr3 (bool): if True remove CDR3 logs (dict):", "IMGT sequence in input sequence. debug (bool) : print debugging statements. Returns: tuple:", "os.path.isfile(cilog): os.remove(cilog) if oformat == \"tab\": os.rmdir(clone_dir) else: printWarning(\"Using --clean all with --oformat", "but couldn't find upstream match\" % (scodons[ospos], ospos), debug) log[\"PASS\"]=False log[\"FAIL\"]=\"FAILED_MATCH:\"+str(spos) elif qcodons[qpos]", "seq_rec in newseqs[j]]) if meta_data is not None: meta_data_list = [] for m", "executable subprocess.check_output([\"igphyml\"]) except: printError(\"igphyml not found :-/\") try: #get GY94 starting topologies p", "desired lg = len(newgerm) sites = range(0, lg) transtable = clones[0].sequence_id.maketrans(\" \", \"_\")", "print(\" \".join(hlp_args)) print('error>', e.output, '<') printError(\"HLP tree building failed\") log = OrderedDict() log[\"OUTPUT\"]", "information for each sequence out_dir (str): directory for output files. fail_writer (changeo.IO.TSVWriter): failed", "newgerm[-1] = newgerm[-1] + \"N\" elif len(lcodon) == 1: newgerm[-1] = newgerm[-1] +", "qi[1:3] or scodons[i+1] != qi[3:6]: qi = \"N\" + qi spos = i", "oformat == \"tab\": os.rmdir(clone_dir) else: printWarning(\"Using --clean all with --oformat txt will delete", "Arguments: r (changeo.Receptor.Receptor): receptor object for a particular sequence. clones (list): list of", "format (str): input and output format. out_args (dict): arguments for output preferences. Returns:", "delim (str): delimited to use when appending meta_data. Returns: list: deduplicated receptors within", "str(clones[i].sequence_id)) # print(\"\\n \" + str((sequences[i])) ) # print(\"\\n\" + str((germline))) for j", "len(regions[\"cdr1_imgt\"]) + [45] * len( regions[\"fwr2_imgt\"]) + \\ [60] * len(regions[\"cdr2_imgt\"]) + [80]", "+ kj.count(\"G\") + kj.count(\"C\") ambigchar[useqs[ki]] = ncounti ambigchar[useqs[kj]] = ncountj # this algorithm", "debug (bool) : print debugging statements. Returns: tuple: (modified input sequence, modified starting", "logs=None, fail_writer=None, out_dir=None, min_seq=1): \"\"\" Create intermediate sequence alignment and partition files for", "\"SINGLE FRAME-SHIFTING INSERTION\": fails[\"in_fail\"] += 1 else: fails[\"other_fail\"] += 1 else: log =", "%s , id: %s, seq: %s\" %(len(sequences[0]),\\ clones[0].clone,clones[0].sequence_id,sequences[0])) return imgtar, germline, sites, nseqs", "from sequence ID if(meta_data is not None): for m in range(0,len(meta_data)): md =", "%d\" % (scodons[spos], spos, qpos)) ospos=spos oqpos=qpos spos += 1 qpos += 1", "range(0, sites, 3): if tallies[i//3] > 0: newgerm.append(germline[i:(i+3)]) lcodon=germline[i:(i+3)] imgt.append(imgtar[i]) if len(lcodon) ==", "for seqi in range(0, len(sequences)): i = sequences[seqi] if len(i) != sites or", "1 fails[\"rec_count\"] += 1 #printProgress(rec_count, rec_count, 0.05, start_time) ptcs = hasPTC(r.sequence_imgt) gptcs =", "the number of sequences in clones. \"\"\" sites = len(sequences[0]) nseqs = len(sequences)", "by alignment to IMGT reference, then produces input files for IgPhyML Arguments: db_file", "in IgPhyML (-t) motifs (str): motifs to use in IgPhyML (--motifs) hotness (str):", "to append to sequence ID to ensure uniqueness.\"\"\") igphyml_group = parser.add_argument_group(\"IgPhyML arguments (see", "= 0 for i in range(0,len(seq1)): if seq1[i] != \"N\" and seq1[i] !=", "runIgPhyML(outfile, igphyml_out, clone_dir, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", nohlp=False, asr=-1, clean=\"none\"):", "log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"SEQ_IN\"] = r.sequence_input log[\"SEQ_IMGT\"]", "True r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID r.sequence_id = r.sequence_id.replace(\":\",\"-\") #remove", "None} if pass_handle is not None: output[\"pass\"] = pass_handle.name pass_handle.close() if fail_handle is", "= True if found_no_funct is False: printWarning(\"FUNCTIONAL column not found.\") found_no_funct = True", "= \"FRAME-SHIFTING DELETION\" log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = receptor.sequence_imgt return", "cloneseqs, logs, fails, out_args, fail_writer, mask = not nmask) if total == sample_depth:", "\"gy\", \"--outrep\", outrep, \"--threads\", str(nproc),\"--outname\",gyout] hlp_args = [\"igphyml\",\"--repfile\", outrep, \"-m\", \"HLP\", \"--run_id\", \"hlp\",", "calls main \"\"\" # Parse command line arguments parser = getArgParser() checkArgs(parser) args", "(int): Number of threads to parallelize IgPhyML across optimization (str): Optimize combination of", "repeating this method but with an edited input sequence if not recursive: frameshifts", "file of masked, concatenated sequences outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm,", "len(conseqs) < min_seq: for j in range(0, nseqs): logs[clones[j].sequence_id][\"FAIL\"] = \"Clone too small:", "seq.replace(\".\", \"-\"))) if len(useqs_f) == 1 and duplicate: if meta_data is not None:", "Arguments: db_file (str): input tab-delimited database file. meta_data (str): Field to append to", "break elif scodons[i][0] == \".\": scodons[i] = \"N\" + scodons[i][1:3] if scodons[i][1:3] !=", "duplicate, imgt): \"\"\" Create intermediate sequence alignment and partition files for IgPhyML output", "column not found.\") found_no_funct = True all_records.append(r) if r.clone in init_clone_sizes: init_clone_sizes[r.clone] +=", "changeo import __version__, __date__ # Imports import os import random import subprocess import", "nsi + si[i] if si[i] != \".\": ndotgaps.append(0) #find any gaps not divisible", "default=\"lr\", choices=(\"n\",\"r\",\"l\",\"lr\",\"tl\",\"tlr\"), help=\"\"\"Optimize combination of topology (t) branch lengths (l) and parameters (r),", "True all_records.append(r) if r.clone in init_clone_sizes: init_clone_sizes[r.clone] += 1 else: init_clone_sizes[r.clone] = 1", "GY94 trees and parameters format (str): input and output format. out_args (dict): arguments", "divisible by 3! len: %d, clone: %s , id: %s, seq: %s\" %(len(sequences[0]),\\", "# Start processing clones clonesizes = {} pass_count, nclones = 0, 0 printMessage(\"Processing", "0 if useqs[kj] in join: ncj = ambigchar[join[useqs[kj]]] if ncj < ncounti: join[useqs[kj]]", "a bunch of Ns qpos += 1 spos += 1 else: # if", "split by indels start_time = time() printMessage(\"Correcting frames and indels of sequences\", start_time=start_time,", "conseq if conseq_f in useqs_f and collapse: clones[useqs_f[conseq_f]].dupcount += clones[j].dupcount logs[clones[j].sequence_id][\"PASS\"] = False", "% (outfile, \"N\", key,\"_\".join(germ_id), partfile)) handle.close() output = {\"pass\": None, \"fail\": None} if", "\"_\") sid = clones[num].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) if len(useqs_f)", "regions[\"cdr3_imgt\"] is not None: simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"]", "Splits identical sequences with different meta_data. Returns: tuple: tuple of length four containing", "range(0, len(sequences)): printError(\"%s\\n%s\\n\" % (sequences[j],clones[j].getField(\"imgtpartlabels\")),False) printError(\"ChangeO file needs to be corrected\") for j", "if ncdr3: ngerm = [] nimgt = [] for i in range(0, len(newseqs)):", "specified, do not attempt to mask split codons.\"\"\") group.add_argument(\"--md\", nargs=\"+\", action=\"store\", dest=\"meta_data\", help=\"\"\"List", "if len(clones[j].getField(\"imgtpartlabels\")) > maximgt: imgtar = clones[j].getField(\"imgtpartlabels\") maximgt = len(imgtar) sites = maxlen", "r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID r.sequence_id = r.sequence_id.replace(\")\",\"-\") #remove parenthesis", "True, run IgPhyML on outputted data nproc (int) : Number of threads to", "= round(float(vals[i]),2) printLog(log) if clean != \"none\": log = OrderedDict() log[\"START\"] = \"CLEANING\"", "sequence, modified starting position of IMGT sequence in input sequence). \"\"\" spos =", "clones (list): receptor objects within the same clone. sequences (list): sequences within the", "range(0,len(sequences)): cimgt = clones[j].getField(\"imgtpartlabels\") seqdiff = maxlen - len(sequences[j]) imgtdiff = len(imgtar)-len(cimgt) sequences[j]", "debug (bool) : print debugging statements. \"\"\" frameshifts = 0 for ins in", "upstream match\" % (scodons[ospos], ospos), debug) log[\"PASS\"]=False log[\"FAIL\"]=\"FAILED_MATCH:\"+str(spos) elif qcodons[qpos] == scodons[spos]: printDebug(\"Masked", "sequence_id, sequence, sequence_alignment, germline_alignment_d_mask or germline_alignment, v_call, j_call, clone_id, v_sequence_start \"\"\") # Parent", "clones, the length of the first sequence in clones, and the number of", "clustered into clones.\") if r.dupcount is None: r.dupcount = 1 fails[\"rec_count\"] += 1", "min_seq: for seq_f, num in useqs_f.items(): logs[clones[num].sequence_id][\"FAIL\"] = \"Clone too small: \" +", "failed processing. igphyml-pass parameter estimates and lineage trees from running IgPhyML, if specified", "!= \"-\" and seq2[i] != \".\": if seq1[i] != seq2[i]: dist += 1", "and all are zero # distance from the sequence they will be collapse", "spos < s_end and scodons[spos] != qcodons[qpos]: printDebug(\"Checking \" + scodons[spos]+ \"\\t\" +", "[60] * len(regions[\"cdr2_imgt\"]) + [80] * len(regions[\"fwr3_imgt\"]) + \\ [108] * int(len(nseq)) r.setField(\"imgtpartlabels\",", "(str): Field to append to sequence IDs. Splits identical sequences with different meta_data", "1: returns 1 masking succeeds \"\"\" if r.clone is None: printError(\"Cannot export datasets", "data Arguments: outfile (str): Output file name. igphymlout (str): igphyml output file nproc", "sequences and collapse keys = list(useqs.keys()) for k in keys: if useqs[k] in", "None: r.functional = True if found_no_funct is False: printWarning(\"FUNCTIONAL column not found.\") found_no_funct", "or 2 nucleotides downstream of offending codon receptor.sequence_input = ros[0:(psite + 3)] +", "= False mout[1][\"FAIL\"] = \"PTC_ADDED_FROM_MASKING\" logs[mout[1][\"ID\"]] = mout[1] if mout[1][\"PASS\"]: #passreads += r.dupcount", "meta_data target_clones (str): List of clone IDs to analyze. collapse (bool): if True", "file. meta_data (str): Field to append to sequence IDs. Splits identical sequences with", "as mp from argparse import ArgumentParser from collections import OrderedDict from textwrap import", "print debugging statements? recursive (bool): was this function called recursively? \"\"\" frameshifts =", "+ \"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) else: for j in", "sites, 3): tally = 0 for j in range(0, nseqs): if sequences[j][i:(i +", "has a PTC.. %s\\n\" % r.sequence_id) mout[1][\"PASS\"] = False mout[1][\"FAIL\"] = \"PTC_ADDED_FROM_MASKING\" logs[mout[1][\"ID\"]]", "key,\"_\".join(germ_id), partfile)) handle.close() output = {\"pass\": None, \"fail\": None} if pass_handle is not", "+ str(clones[i].sequence_id)) # print(\"\\n \" + str((sequences[i])) ) # print(\"\\n\" + str((germline))) for", "to append to sequence IDs. Splits identical sequences with different meta_data. clones (list)", "+ (\".\"*(len(r.sequence_imgt) - len(simgt))) simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\", "for i in range(0, sites, 3): tally = 0 for j in range(0,", "pass_handle.write(\"%s\\t%s\\t%s_%s\\t%s\\n\" % (outfile, \"N\", key,\"_\".join(germ_id), partfile)) handle.close() output = {\"pass\": None, \"fail\": None}", "partfile)) handle.close() output = {\"pass\": None, \"fail\": None} if pass_handle is not None:", "a sequence Arguments: sequence (str): IMGT gapped sequence in frame 1. Returns: int:", "\",\" + str(len(scodons)) else: log[\"END-MASKED\"] = str(spos) concatenated_seq = Seq(\"\") for i in", "action=\"store\", dest=\"append\", help=\"\"\"List of columns to append to sequence ID to ensure uniqueness.\"\"\")", "for j in range(0, nseqs): for i in range(0, sites, 3): if i", "regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + nseq imgtpartlabels = [13]", "Receptor objects. \"\"\" for i in range(0,len(sequences)): imgtar = clones[i].getField(\"imgtpartlabels\") germline = clones[i].getField(\"germline_imgt_d_mask\")", "sequence_alignment, germline_alignment_d_mask or germline_alignment, v_call, j_call, clone_id, v_sequence_start \"\"\") # Parent parser parser_parent", "clones first and then predict germlines using --cloned\")) if sites > (len(germline)): seqdiff", "= pass_count log[\"FAIL\"] = fail_count log[\"NONFUNCTIONAL\"] = fails[\"nf_fail\"] log[\"FRAMESHIFT_DEL\"] = fails[\"del_fail\"] log[\"FRAMESHIFT_INS\"] =", "database records failed processing. igphyml-pass parameter estimates and lineage trees from running IgPhyML,", "= kj.count(\"A\") + kj.count(\"T\") + kj.count(\"G\") + kj.count(\"C\") ambigchar[useqs[ki]] = ncounti ambigchar[useqs[kj]] =", "Note: Collapse can give misleading dupcount information if some sequences have ambiguous characters", "ncdr3=ncdr3, logs=logs, fail_writer=fail_writer, out_dir=clone_dir, min_seq=min_seq) #If clone is too small, size is returned", "germlines are not the same among sequences in the same clone.\", \"Be sure", "= receptor.sequence_imgt log[\"SEQ_MASKED\"] = receptor.sequence_imgt return receptor.sequence_imgt, log else: curgap = 0 si", "IMGT at that site and scan forward until you find a codon that", "they will be collapse to. if ncountj > ncounti: nci = 0 if", "(t) branch lengths (l) and parameters (r), or nothing (n), for IgPhyML.\"\"\") igphyml_group.add_argument(\"--omega\",", "fail_count = fails[\"rec_count\"] - pass_count # End clone processing printMessage(\"Done\", start_time=start_time, end=True, width=50)", "receptor.sequence_input = ros receptor.sequence_imgt = ris return frameshifts def findAndMask(receptor, scodons, qcodons, spos,", "for i in range(0, len(scodons)): printDebug(\"%s %s\" % (scodons[i], qi[0:3]), debug) if scodons[i]", "processed records. lineages-fail database records failed processing. igphyml-pass parameter estimates and lineage trees", "outfile, \"-m\", \"GY\", \"--run_id\", \"gy\", \"--outrep\", outrep, \"--threads\", str(nproc),\"--outname\",gyout] hlp_args = [\"igphyml\",\"--repfile\", outrep,", "= False log[\"FAIL\"] = \"FRAME-SHIFTING DELETION\" log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"]", "# duplicate sequences in clones with only 1 sequence? imgtar, germline, sites, nseqs", "(none, all) \"\"\" osplit = outfile.split(\".\") outrep = \".\".join(osplit[0:(len(osplit)-1)]) + \"_gy.tsv\" gyout =", "regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + nseq imgtpartlabels = [13] * len(regions[\"fwr1_imgt\"]) + [30] *", "if oformat == \"tab\": igf = open(igphyml_out) names = igf.readline().split(\"\\t\") vals = igf.readline().split(\"\\t\")", "join with (least ambiguous chars) ambigchar = {} #sequence id -> number ATCG", "receptor (changeo.Receptor.Receptor): Receptor object. oqpos (int) : position of interest in input sequence.", "sites, 3): if i == 0: newseqs.append([]) if tallies[i//3] > 0: newseqs[j].append(sequences[j][i:(i+3)]) lcodon", "HLP parameters/trees p = subprocess.check_output(hlp_args) except subprocess.CalledProcessError as e: print(\" \".join(hlp_args)) print('error>', e.output,", "IgPhyML on outputted data Arguments: outfile (str): Output file name. igphymlout (str): igphyml", "c.clone,False) printError(c.getField(\"imgtpartlabels\"),False) printError(\"%s\\n%d\\n\" % (imgtar,j),False) for j in range(0, len(sequences)): printError(\"%s\\n%s\\n\" % (sequences[j],clones[j].getField(\"imgtpartlabels\")),False)", "+ \",\" + str(len(scodons)) else: log[\"END-MASKED\"] = str(spos) concatenated_seq = Seq(\"\") for i", "as e: print(\" \".join(gy_args)) print('error>', e.output, '<') printError(\"GY94 tree building in IgPhyML failed\")", "gaps are in IMGT sequence and remove them for now gaps = []", "(changeo.Receptor.Receptor): Receptor object. oqpos (int) : position of interest in input sequence. ospos", "+= 1 else: # if not the same, mask IMGT at that site", "\"NN\" if ncdr3: ngerm = [] nimgt = [] for i in range(0,", "{} clones = {} logs = OrderedDict() fails = {\"rec_count\":0, \"seq_fail\":0, \"nf_fail\":0, \"del_fail\":0,", "#alignment if scodons[i][2:3] != qi[2:3] or scodons[i + 1] != qi[3:6]: qi =", "keys = list(useqs.keys()) for k in keys: if useqs[k] in join: rfrom =", "for j in range(0,len(imgtar)): if imgtar[j] != 108: nseq.append(sequences[i][j]) if j < len(germline):", "if sequences[j][i:(i + 3)] != \"...\": tally += 1 tallies.append(tally) newseqs = []", "isinstance(clones[j].getField(meta_data[m]), str): clones[j].setField(meta_data[m],clones[j].getField(meta_data[m]).replace(\"_\", \"\")) meta_data_list.append(str(clones[j].getField(meta_data[m]))) conseq_f = \"\".join([str(seq_rec) for seq_rec in newseqs[j]])+delim+\":\".join(meta_data_list) else:", "[] for i in range(0, sites, 3): tally = 0 for j in", "mask the #first codon in the IMGT seq, other times it will be", "in sorted(clonesizes, key=clonesizes.get, reverse=True): #print(key + \"\\t\" + str(clonesizes[key])) outfile = os.path.join(clone_dir, \"%s.fasta\"", "\"in_fail\":0, \"minseq_fail\":0, \"other_fail\":0, \"region_fail\":0, \"germlineptc\":0, \"fdcount\":0, \"totalreads\":0, \"passreads\":0, \"failreads\":0} # Mask codons split", "j in range(0, len(imgt)): if imgt[j] != 108: nseq.append(newseqs[i][j]) if i == 0:", "logs[clones[str(k)][j].sequence_id][\"PASS\"] = False clonesizes[str(k)] = -len(cloneseqs[str(k)]) else: clonesizes[str(k)] = outputIgPhyML(clones[str(k)], cloneseqs[str(k)], meta_data=meta_data, collapse=collapse,", "len(imgtar): correctseqs = True if correctseqs: maxlen = sites maximgt = len(imgtar) for", "sequence IDs. Splits identical sequences with different meta_data. clones (list) : list of", "key) partfile = os.path.join(clone_dir, \"%s.part.txt\" % key) if clonesizes[key] > 0: germ_id =", "= [] for m in meta_data: meta_data_list.append(clones[j].getField(m).replace(\":\", \"_\")) cid = delim + str(delim.join(meta_data_list))", "% ins, debug) mout = maskSplitCodons(receptor, recursive=True) if mout[1][\"PASS\"]: #if debug: receptor.sequence_input =", "#we have to shift the frame. This attempts to correct for this by", "log[\"PASS\"] = True if debug: print(receptor.sequence_id) # adjust starting position of query sequence", "ptcs = (\"TAA\", \"TGA\", \"TAG\", \"TRA\", \"TRG\", \"TAR\", \"TGR\", \"TRR\") for i in", "+ str(len(useqs_f)) logs[clones[num].sequence_id][\"PASS\"] = False return -len(useqs_f) elif not collapse and len(conseqs) <", "e.output, '<') printError(\"HLP tree building failed\") log = OrderedDict() log[\"OUTPUT\"] = igphyml_out if", "= ros receptor.sequence_imgt = ris frameshifts += 1 printDebug(\"FRAMESHIFT of length %d!\" %", "%s at position %d\" % (scodons[ospos], ospos), debug) scodons[ospos] = \"NNN\" if \"MASKED\"", "sequences, meta_data=None, collapse=False, ncdr3=False, logs=None, fail_writer=None, out_dir=None, min_seq=1): \"\"\" Create intermediate sequence alignment", "\"-m\", \"HLP\", \"--run_id\", \"hlp\", \"--threads\", str(nproc), \"-o\", optimization, \"--omega\", omega, \"-t\", kappa, \"--motifs\",", "sequences writer object. Returns: 0: returns 0 if an error occurs or masking", "receptor.sequence_imgt = ris frameshifts += 1 printDebug(\"FRAMESHIFT of length %d!\" % ins, debug)", "\"--run_id\", \"hlp\", \"--threads\", str(nproc), \"-o\", optimization, \"--omega\", omega, \"-t\", kappa, \"--motifs\", motifs, \"--hotness\",", "if tallies[i//3] > 0: newgerm.append(germline[i:(i+3)]) lcodon=germline[i:(i+3)] imgt.append(imgtar[i]) if len(lcodon) == 2: newgerm[-1] =", "r.functional = True if found_no_funct is False: printWarning(\"FUNCTIONAL column not found.\") found_no_funct =", "containing metadata to include in output fasta file sequence headers.\"\"\") group.add_argument(\"--clones\", nargs=\"+\", action=\"store\",", "target_clones=None, collapse=False, ncdr3=False, nmask=False, sample_depth=-1, min_seq=1,append=None, igphyml=False, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\",", "1 spos = ospos printDebug(\"But codon was apparently preserved\", debug) if \"IN-FRAME\" in", "nseq = [] ncdr3 = 0 for j in range(0, len(imgt)): if imgt[j]", "useqs[ki] in join: nci = ambigchar[join[useqs[ki]]] if nci < ncountj: join[useqs[ki]] = useqs[kj]", "v_call, j_call, clone_id, v_sequence_start \"\"\") # Parent parser parser_parent = getCommonArgParser(out_file=False, log=True, format=True)", "newgerm[-1] = newgerm[-1] + \"NN\" if ncdr3: ngerm = [] nimgt = []", "if True collapse identical sequences. ncdr3 (bool): if True remove all CDR3s. nmask", "target_clones: if init_clone_sizes[r.clone] >= min_seq: big_enough.append(r) fails[\"totalreads\"] = len(all_records) #fails[\"minseq_fail\"] = len(all_records) -", "sequences are clustered into clones.\") if r.dupcount is None: r.dupcount = 1 fails[\"rec_count\"]", "for each sequence out_dir (str): directory for output files. fail_writer (changeo.IO.TSVWriter): failed sequences", "of sites must be divisible by 3! len: %d, clone: %s , id:", "i in range(0, sites, 3): tally = 0 for j in range(0, nseqs):", "for output preferences. fail_writer (changeo.IO.TSVWriter): failed sequences writer object. Returns: 0: returns 0", "OrderedDict() log[\"END\"] = \"IgPhyML analysis\" printLog(log) # Note: Collapse can give misleading dupcount", "estimate: e = estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--oformat\", action=\"store\", dest=\"oformat\",", "gap detected! Refusing to include sequence.\", debug) log[\"PASS\"] = False log[\"FAIL\"] = \"FRAME-SHIFTING", "< len(qcodons) and spos < s_end and scodons[spos] != qcodons[qpos]: printDebug(\"Checking \" +", "\"TAR\", \"TGR\", \"TRR\") for i in range(0, len(sequence), 3): if sequence[i:(i+3)] in ptcs:", "(dict): log of information for each sequence debug (bool): print debugging statements? recursive", "reads to be subsampled (before deduplication).\"\"\") group.add_argument(\"--append\", nargs=\"+\", action=\"store\", dest=\"append\", help=\"\"\"List of columns", "cimgt.extend([last]*(imgtdiff)) clones[j].setField(\"imgtpartlabels\",cimgt) if meta_data is not None: meta_data_ar = meta_data[0].split(\",\") for c in", "with different meta_data collapse (bool): if True collapse identical sequences. ncdr3 (bool): if", "of Ns qpos += 1 spos += 1 else: # if not the", "meta_data. Returns: list: deduplicated receptors within a clone. \"\"\" keys = list(useqs.keys()) join", "splitName, getDbFields, getFormatOperators, getOutputHandle, getOutputName from changeo.Alignment import RegionDefinition from changeo.Commandline import CommonHelpFormatter,", "not None: for i in range(1,len(meta_data)): germ_id.append(\"GERM\") clonef.write(\">%s_%s\\n\" % (clones[0].clone,\"_\".join(germ_id))) for i in", "= germline + \"N\" * (seqdiff) if sites % 3 != 0: printError(\"number", "\"DUPCOUNT\": cid = delim + \"0\" sid = clones[num].sequence_id.translate(transtable) + \"_1\" + cid", "partf.write(\"%s\\n\" % (clones[0].v_call.split(\"*\")[0])) partf.write(\"%s\\n\" % (clones[0].j_call.split(\"*\")[0])) partf.write(\",\".join(map(str, imgt))) partf.write(\"\\n\") def outputIgPhyML(clones, sequences, meta_data=None,", "%s\" % (\"Predicted germlines are not the same among sequences in the same", "not \"\" and regions[\"cdr3_imgt\"] is not None: simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] +", "Splits identical sequences with different meta_data collapse (bool): if True collapse identical sequences.", "= ambigchar[join[useqs[ki]]] if nci < ncountj: join[useqs[ki]] = useqs[kj] joinseqs[ki] = kj else:", "= fails[\"germlineptc\"] log[\"OTHER_FAIL\"] = fails[\"other_fail\"] if collapse: log[\"DUPLICATE\"] = fail_count - fails[\"seq_fail\"] log[\"END\"]", "building in IgPhyML failed\") log = OrderedDict() log[\"START\"] = \"IgPhyML HLP analysis\" log[\"OPTIMIZE\"]", "qi = \"NN\" + qi spos = i break elif scodons[i][0] == \".\":", "the query, at which point #we have to shift the frame. This attempts", "help=\"\"\"Omega parameters to estimate for FWR,CDR respectively: e = estimate, ce = estimate", "+ str(ncdr3)) useqs_f = OrderedDict() conseqs = [] for j in range(0, nseqs):", "= splitName(db_file) else: clone_name = out_args[\"out_name\"] if dir_name is None: clone_dir = clone_name", "log, debug, recursive) if not log[\"PASS\"] and not recursive: log[\"FRAMESHIFTS\"] = frameshifts if", "(scodons[spos], spos, qpos)) ospos=spos oqpos=qpos spos += 1 qpos += 1 while spos", "\",\" + str(spos) else: log[\"END-MASKED\"] = str(spos) else: printDebug(\"Masked %s at position %d,", "(sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) if len(useqs_f) == 1 and duplicate: if meta_data is not", "= not nmask) if total == sample_depth: break # Start processing clones clonesizes", "are not the same within clone %d!\\n\" % c.clone,False) printError(c.getField(\"imgtpartlabels\"),False) printError(\"%s\\n%d\\n\" % (imgtar,j),False)", "cid = delim + cid.replace(\":\", \"_\") sid = clones[num].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" %", "to sequence IDs. Splits identical sequences with different meta_data target_clones (str): List of", "debug) qcodons = [qi[i:i + 3] for i in range(0, len(qi), 3)] frameshifts", "Field to append to sequence IDs. Splits identical sequences with different meta_data. delim", "modified IMGT gapped sequence. log: dict of sequence information \"\"\" debug = False", "outrep, \"--threads\", str(nproc),\"--outname\",gyout] hlp_args = [\"igphyml\",\"--repfile\", outrep, \"-m\", \"HLP\", \"--run_id\", \"hlp\", \"--threads\", str(nproc),", "specified, remove CDR3 from all sequences.\"\"\") group.add_argument(\"--nmask\", action=\"store_true\", dest=\"nmask\", help=\"\"\"If specified, do not", "seq2 (str): sequence 2 fbreak (bool): break after first difference found? Returns: int:", "starting position of IMGT sequence in input sequence s_end (int): end of IMGT", "k in keys: if useqs[k] in join: rfrom = receptors[useqs[k]] rto = receptors[join[useqs[k]]]", "e = estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"-t\", action=\"store\", dest=\"kappa\", type=str,", "OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"PASS\"] = False log[\"FAIL\"] = \"NONFUNCTIONAL/PTC\"", "0 and curgap != 0: if curgap % 3 != 0 : printDebug(\"Frame-shifting", "out_args[\"failed\"]: fail_handle = getOutputHandle(db_file, out_label=\"lineages-fail\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=out_args[\"out_type\"]) fail_writer = writer(fail_handle, fields=out_fields) cloneseqs", "= clones[j].getField(\"imgtpartlabels\") maximgt = len(imgtar) sites = maxlen for j in range(0,len(sequences)): cimgt", "not None: meta_data_list = [] for m in meta_data: meta_data_list.append(clones[j].getField(m).replace(\":\", \"_\")) cid =", "3): if tallies[i//3] > 0: newgerm.append(germline[i:(i+3)]) lcodon=germline[i:(i+3)] imgt.append(imgtar[i]) if len(lcodon) == 2: newgerm[-1]", "meta_data=None, target_clones=None, collapse=False, ncdr3=False, nmask=False, sample_depth=-1, min_seq=1,append=None, igphyml=False, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\",", "germline_alignment_d_mask or germline_alignment, v_call, j_call, clone_id, v_sequence_start \"\"\") # Parent parser parser_parent =", "= receptor.v_seq_start - 1 + oqpos*3 pisite = ospos * 3 if (psite", "small: \" + str(len(conseqs)) logs[clones[j].sequence_id][\"PASS\"] = False return -len(conseqs) # Output fasta file", "# Parent parser parser_parent = getCommonArgParser(out_file=False, log=True, format=True) # Define argument parser parser", "(str): omega optimization in IgPhyML (--omega) kappa (str): kappa optimization in IgPhyML (-t)", "clones): \"\"\" Remove CDR3 from all sequences and germline of a clone Arguments:", "\" + str(len(useqs_f)) logs[clones[num].sequence_id][\"PASS\"] = False return -len(useqs_f) elif not collapse and len(conseqs)", "log, debug): \"\"\" Checks whether a frameshift occured in a sequence Arguments: receptor", "= 0 if useqs[ki] in join: nci = ambigchar[join[useqs[ki]]] if nci < ncountj:", "False clonesizes[str(k)] = -len(cloneseqs[str(k)]) else: clonesizes[str(k)] = outputIgPhyML(clones[str(k)], cloneseqs[str(k)], meta_data=meta_data, collapse=collapse, ncdr3=ncdr3, logs=logs,", "sequence out_dir (str): directory for output files. fail_writer (changeo.IO.TSVWriter): failed sequences writer object.", "scodons[i+1] != qi[3:6]: qi = \"N\" + qi spos = i break else:", "sites = len(sequences[0]) nseqs = len(sequences) imgtar = clones[0].getField(\"imgtpartlabels\") germline = clones[0].getField(\"germline_imgt_d_mask\") if", ": was this method part of a recursive call? mask (bool) : mask", "getCommonArgParser, parseCommonArgs def correctMidCodonStart(scodons, qi, debug): \"\"\" Find and mask split codons Arguments:", "= estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--oformat\", action=\"store\", dest=\"oformat\", type=str, default=\"tab\",", "pass_count += clonesizes[str(k)] else: fails[\"seq_fail\"] -= clonesizes[str(k)] fails[\"minseq_fail\"] -= clonesizes[str(k)] fail_count = fails[\"rec_count\"]", "clones[num].sequence_id.translate(transtable) + \"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) else: for j", "output = {\"pass\": None, \"fail\": None} if pass_handle is not None: output[\"pass\"] =", "use with igphyml? Returns: str: modified IMGT gapped sequence. log: dict of sequence", "in range(0, len(qi), 3)] frameshifts = 0 s_end = 0 #adjust for the", "+ \":\" + ncon_seq) if i == 1: ncon_seq = ncon_seq + \".\"", "len(big_enough) == 0: printError(\"\\n\\nNo sequences found that match specified criteria.\",1) if sample_depth >", "in clones.keys(): if len(clones[str(k)]) < min_seq: for j in range(0, len(clones[str(k)])): logs[clones[str(k)][j].sequence_id][\"FAIL\"] =", "debug) printDebug(receptor.sequence_input, debug) printDebug(ris, debug) printDebug(receptor.sequence_imgt, debug) printDebug(\"RUNNING %d\" % ins, debug) mout", "= delim + cid.replace(\":\", \"_\") sid = clones[num].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"),", "split codons Arguments: scodons (list): list of codons in IMGT sequence. qi (str)", "hotness=hotness, oformat=oformat, nohlp=nohlp,clean=clean,asr=asr) return output def getArgParser(): \"\"\" Defines the ArgumentParser Returns: argparse.ArgumentParser:", "sites > (len(germline)): seqdiff = sites - len(germline) germline = germline + \"N\"", "None: for i in range(1,len(meta_data)): germ_id.append(\"GERM\") clonef.write(\">%s_%s\\n\" % (clones[0].clone,\"_\".join(germ_id))) for i in range(0,", "if init_clone_sizes[r.clone] >= min_seq: big_enough.append(r) fails[\"totalreads\"] = len(all_records) #fails[\"minseq_fail\"] = len(all_records) - len(big_enough)", "# if both are the same, move both forward spos += 1 qpos", "of sequences in clones. clones (list): list of Receptor objects. meta_data (str): Field", "False, do not attempt to mask split codons sample_depth (int): depth of subsampling", "= i break return qi, spos def checkFrameShifts(receptor, oqpos, ospos, log, debug): \"\"\"", "line = line.rstrip(\"\\r\") lsplit = line.split(\"\\t\") if len(lsplit) == 4: os.remove(lsplit[0]) os.remove(lsplit[1]) os.remove(lsplit[3])", "object. scodons (list): list of codons in IMGT sequence qcodons (list): list of", "% (2, len(newgerm))) partf.write(\"FWR:IMGT\\n\") partf.write(\"CDR:IMGT\\n\") partf.write(\"%s\\n\" % (clones[0].v_call.split(\"*\")[0])) partf.write(\"%s\\n\" % (clones[0].j_call.split(\"*\")[0])) partf.write(\",\".join(map(str, imgt)))", "newseqs[i] = nseq newgerm = ngerm imgt = nimgt #print(\"Length: \" + str(ncdr3))", "None if out_args[\"failed\"]: fail_handle = getOutputHandle(db_file, out_label=\"lineages-fail\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=out_args[\"out_type\"]) fail_writer = writer(fail_handle,", "ncdr3 = 0 #print(\"imgtarlen: \" + str(len(imgtar))) #print(\"seqlen: \" + str(len(sequences[i]))) #print(\"germline: \"", "is not None: meta_data_list = [] for m in range(0,len(meta_data)): if isinstance(clones[j].getField(meta_data[m]), str):", "s_end = 0 #adjust for the fact that IMGT sequences can end on", "meta_data is not None: meta_data_ar = meta_data[0].split(\",\") for c in clones: if meta_data", "for seq_rec in newseqs[j]]) if meta_data is not None: meta_data_list = [] for", "\" + str(len(conseqs)) logs[clones[j].sequence_id][\"PASS\"] = False return -len(conseqs) # Output fasta file of", "main for each input file for f in args.__dict__[\"db_files\"]: args_dict[\"db_file\"] = f buildTrees(**args_dict)", "in input sequence. ospos (int) : position of interest in IMGT sequence. log", "outfile + \"_igphyml_stats_gy.txt\" gy_args = [\"igphyml\", \"--repfile\", outfile, \"-m\", \"GY\", \"--run_id\", \"gy\", \"--outrep\",", "format) out_fields = getDbFields(db_file, reader=reader) # open input file handle = open(db_file, \"r\")", "back to IMGT sequence ncon_seq = \"\" counter = 0 for i in", "in Receptor list. receptors (dict): receptors within a clone (index is value in", "< len(r.sequence_imgt): r.fwr4_imgt = r.fwr4_imgt + (\".\"*(len(r.sequence_imgt) - len(simgt))) simgt = regions[\"fwr1_imgt\"] +", "output preferences. fail_writer (changeo.IO.TSVWriter): failed sequences writer object. Returns: 0: returns 0 if", "in the IMGT seq, other times it will be legitimately absent from the", "query, at which point #we have to shift the frame. This attempts to", "None: clone_dir = clone_name else: clone_dir = os.path.join(dir_name, clone_name) if not os.path.exists(clone_dir): os.makedirs(clone_dir)", "list of codons in IMGT sequence. qi (str) : input sequence. spos (int)", "frameshifts += 1 printDebug(\"FRAMESHIFT of length %d!\" % ins, debug) log[\"FAIL\"] = \"SINGLE", "range(0, len(qi), 3)] frameshifts = 0 s_end = 0 #adjust for the fact", "ATCG nucleotides for i in range(0,len(keys)-1): for j in range(i+1,len(keys)): ki = keys[i]", "group.add_argument(\"--sample\", action=\"store\", dest=\"sample_depth\", type=int, default=-1, help=\"\"\"Depth of reads to be subsampled (before deduplication).\"\"\")", "range(0, nseqs): for i in range(0, sites, 3): if i == 0: newseqs.append([])", "cloneseqs[r.clone] = [mask_seq] return 1 else: if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"failreads\"]", "0: returns 0 if an error occurs or masking fails. 1: returns 1", ": consensus sequences. duplicate (bool) : duplicate sequence if only one in a", "identical sequences Argument: useqs (dict): unique sequences within a clone. maps sequence to", "help=\"\"\"Don't run HLP model?\"\"\") igphyml_group.add_argument(\"--asr\", action=\"store\", dest=\"asr\", type=float, default=-1, help=\"\"\"Ancestral sequence reconstruction interval", "[] # remove gap only sites from observed data newgerm = [] imgt", "ins in range(1, 3): ros = receptor.sequence_input ris = receptor.sequence_imgt psite = receptor.v_seq_start", "to sequence IDs. Splits identical sequences with different meta_data. Returns: tuple: tuple of", "files. \"\"\" # Print parameter info log = OrderedDict() log[\"START\"] = \"BuildTrees\" log[\"FILE\"]", "keys = list(useqs.keys()) join = {} # id -> sequence id to join", "output def getArgParser(): \"\"\" Defines the ArgumentParser Returns: argparse.ArgumentParser: argument parsers. \"\"\" #", "(int): minimum number of sequences per clone append (str): column name to append", "deduplication min_seq (int): minimum number of sequences per clone append (str): column name", "(n), for IgPhyML.\"\"\") igphyml_group.add_argument(\"--omega\", action=\"store\", dest=\"omega\", type=str, default=\"e,e\", choices = (\"e\", \"ce\", \"e,e\",", "if igphyml: runIgPhyML(pass_handle.name, igphyml_out=igphyml_out, clone_dir=clone_dir, nproc=nproc, optimization=optimization, omega=omega, kappa=kappa, motifs=motifs, hotness=hotness, oformat=oformat, nohlp=nohlp,clean=clean,asr=asr)", "ArgumentParser(description=__doc__, epilog=fields, parents=[parser_parent], formatter_class=CommonHelpFormatter, add_help=False) group = parser.add_argument_group(\"sequence processing arguments\") group.add_argument(\"--collapse\", action=\"store_true\", dest=\"collapse\",", "files? none: leave all intermediate files; all: delete all intermediate files.\"\"\") igphyml_group.add_argument(\"--optimize\", action=\"store\",", "want to mask the #first codon in the IMGT seq, other times it", "scodons[i] != \"NNN\": s_end = i printDebug(\"%i:%i:%s\" % (s_end, len(scodons), scodons[s_end]), debug) s_end", "possible that SEQ-IMGT ends on a bunch of Ns qpos += 1 spos", "scodons[spos] == \"...\" and qcodons[qpos] != \"...\": #if IMGT gap, move forward in", "sequences found that match specified criteria.\",1) if sample_depth > 0: random.shuffle(big_enough) total =", "query sequence qi = qi[(receptor.v_seq_start - 1):] #tally where --- gaps are in", "in join: rfrom = receptors[useqs[k]] rto = receptors[join[useqs[k]]] rto.dupcount += rfrom.dupcount if log", "!= 108: nseq.append(sequences[i][j]) if j < len(germline): ngermline.append(germline[j]) nimgtar.append(imgtar[j]) else: ncdr3 += 1", "0 for ins in range(1, 3): ros = receptor.sequence_input ris = receptor.sequence_imgt psite", "= estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--oformat\", action=\"store\", dest=\"oformat\", type=str, default=\"tab\", choices=(\"tab\", \"txt\"), help=\"\"\"IgPhyML", "action=\"store_true\", dest=\"nmask\", help=\"\"\"If specified, do not attempt to mask split codons.\"\"\") group.add_argument(\"--md\", nargs=\"+\",", "meta_data_list = [] for m in range(0,len(meta_data)): if isinstance(clones[j].getField(meta_data[m]), str): clones[j].setField(meta_data[m],clones[j].getField(meta_data[m]).replace(\"_\", \"\")) meta_data_list.append(str(clones[j].getField(meta_data[m])))", "action=\"store_true\", dest=\"igphyml\", help=\"\"\"Run IgPhyML on output?\"\"\") igphyml_group.add_argument(\"--nproc\", action=\"store\", dest=\"nproc\", type=int, default=1, help=\"\"\"Number of", "import default_out_args from presto.IO import printLog, printMessage, printWarning, printError, printDebug from changeo.Defaults import", "str(len(imgtar))) #print(\"seqlen: \" + str(len(sequences[i]))) #print(\"germline: \" + str(len(germline))) #if len(germline) < len(sequences[i]):", "3) < len(ris): #cut out 1 or 2 nucleotides downstream of offending codon", "logs[r.sequence_id][\"FWRCDRSEQ\"] = simgt fails[\"seq_fail\"] += 1 fails[\"region_fail\"] += 1 return 0 elif regions[\"fwr3_imgt\"]", "group.add_argument(\"--nmask\", action=\"store_true\", dest=\"nmask\", help=\"\"\"If specified, do not attempt to mask split codons.\"\"\") group.add_argument(\"--md\",", "argument parsers. \"\"\" # Define input and output field help message fields =", "len(germline): ngermline.append(germline[j]) nimgtar.append(imgtar[j]) else: ncdr3 += 1 clones[i].setField(\"imgtpartlabels\",nimgtar) clones[i].setField(\"germline_imgt_d_mask\", \"\".join(ngermline)) sequences[i] = \"\".join(nseq)", "logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"FWR/CDR error\" logs[r.sequence_id][\"FWRCDRSEQ\"] = simgt fails[\"seq_fail\"] += 1", "DELETION\" log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = receptor.sequence_imgt return receptor.sequence_imgt, log", "scodons[ospos] = \"NNN\" if spos >= s_end: printDebug(\"Masked %s at position %d, at", "i == 0: ncon_seq = ncon_seq + concatenated_seq[counter] counter += 1 ncon_seq =", "\"TRA\", \"TRG\", \"TAR\", \"TGR\", \"TRR\") for i in range(0, len(sequence), 3): if sequence[i:(i+3)]", "%(len(sequences[0]),\\ clones[0].clone,clones[0].sequence_id,sequences[0])) return imgtar, germline, sites, nseqs def outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse,", "with fewer than the specified number of sequences will be excluded.\"\"\") group.add_argument(\"--sample\", action=\"store\",", "contains log information for each sequence out_dir (str): directory for output files. fail_writer", "try: #check for igphyml executable subprocess.check_output([\"igphyml\"]) except: printError(\"igphyml not found :-/\") try: #get", "nseqs): logs[clones[j].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(conseqs)) logs[clones[j].sequence_id][\"PASS\"] = False return", "else: receptor.sequence_input = ros receptor.sequence_imgt = ris return frameshifts def findAndMask(receptor, scodons, qcodons,", "if out_args[\"log_file\"] is not None: log_handle = open(out_args[\"log_file\"], \"w\") for j in logs.keys():", "between IMGT labels within a clone Arguments: sequences (list): list of sequences in", "== 0 and curgap != 0: if curgap % 3 != 0 :", "and curgap != 0: if curgap % 3 != 0 : printDebug(\"Frame-shifting gap", "% clones[0].clone) with open(outfile, \"w\") as clonef: if collapse: for seq_f, num in", "with open(partfile, \"w\") as partf: partf.write(\"%d %d\\n\" % (2, len(newgerm))) partf.write(\"FWR:IMGT\\n\") partf.write(\"CDR:IMGT\\n\") partf.write(\"%s\\n\"", "codons Arguments: receptor (changeo.Receptor.Receptor): Receptor object. scodons (list): list of codons in IMGT", "for k in clones.keys(): if len(clones[str(k)]) < min_seq: for j in range(0, len(clones[str(k)])):", "input and output format. out_args (dict): arguments for output preferences. Returns: dict: dictionary", "receptor.v_seq_start - 1 + oqpos*3 pisite = ospos * 3 if (psite +", "+ oqpos*3 pisite = ospos * 3 if (psite + 3 + ins)", "log=None, meta_data=None, delim=\":\"): \"\"\" Collapses identical sequences Argument: useqs (dict): unique sequences within", "(str) : modified germline of clonal lineage. conseqs (list) : consensus sequences. duplicate", "intermediate sequence alignment and partition files for IgPhyML output Arguments: clones (list): receptor", "asr=-1, clean=\"none\"): \"\"\" Run IgPhyML on outputted data Arguments: outfile (str): Output file", "clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) if nseqs == 1 and duplicate: if meta_data", "scodons[i][0] == \".\": scodons[i] = \"N\" + scodons[i][1:3] if scodons[i][1:3] != qi[1:3] or", "for i in range(0, sites, 3): if i == 0: newseqs.append([]) if tallies[i//3]", "= {} clones = {} logs = OrderedDict() fails = {\"rec_count\":0, \"seq_fail\":0, \"nf_fail\":0,", "output?\"\"\") igphyml_group.add_argument(\"--nproc\", action=\"store\", dest=\"nproc\", type=int, default=1, help=\"\"\"Number of threads to parallelize IgPhyML across.\"\"\")", "same within clone %d!\\n\" % c.clone,False) printError(c.getField(\"imgtpartlabels\"),False) printError(\"%s\\n%d\\n\" % (imgtar,j),False) for j in", "files: <folder> folder containing fasta and partition files for each clone. lineages successfully", "printDebug(ros, debug) printDebug(receptor.sequence_input, debug) printDebug(ris, debug) printDebug(receptor.sequence_imgt, debug) printDebug(\"RUNNING %d\" % ins, debug)", "nimgtar = [] ngermline = [] ncdr3 = 0 #print(\"imgtarlen: \" + str(len(imgtar)))", "point we\"ll just want to mask the #first codon in the IMGT seq,", "(s_end, len(scodons), scodons[s_end]), debug) s_end += 1 qpos = 0 if mask: findAndMask(receptor,", "md.replace(\")\",\"-\") #remove parenthesis from metadata md = md.replace(\"(\",\"-\") #remove parenthesis from metadata r.setField(meta_data[m],md)", "positions rd = RegionDefinition(r.junction_length, amino_acid=False) regions = rd.getRegions(r.sequence_imgt) if regions[\"cdr3_imgt\"] is not \"\"", "log[\"IN-FRAME\"] = str(spos) elif qpos >= len(qcodons) and spos < s_end: printDebug(\"FAILING MATCH\",", "0 si = nsi scodons = [si[i:i + 3] for i in range(0,", "each sequence debug (bool): print debugging statements? recursive (bool): was this function called", "sites maximgt = len(imgtar) for j in range(0,len(sequences)): if len(sequences[j]) > maxlen: maxlen", "(--hotness) oformat (str): output format for IgPhyML (tab or txt) nohlp (bool): If", "of threads to parallelize IgPhyML across optimization (str): Optimize combination of topology (t)", "hotness log[\"NPROC\"] = nproc printLog(log) if not nohlp: try: #estimate HLP parameters/trees p", "hotness, \"--oformat\", oformat, \"--outname\", igphyml_out] if asr >= 0: hlp_args.append(\"--ASRc\") hlp_args.append(str(asr)) log =", "log[\"PASS\"] = False log[\"FAIL\"] = \"UNKNOWN\" def maskSplitCodons(receptor, recursive=False, mask=True): \"\"\" Identify junction", "metadata md = md.replace(\")\",\"-\") #remove parenthesis from metadata md = md.replace(\"(\",\"-\") #remove parenthesis", "if append is not None: if append is not None: for m in", "total = 0 for r in big_enough: if r.functional is None: r.functional =", "append is not None: for m in append: r.sequence_id = r.sequence_id + \"_\"", "and regions[\"fwr3_imgt\"] is not None: simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] +", "\"...\" and qcodons[qpos] != \"...\": #if IMGT gap, move forward in imgt spos", "+ 1] != qi[3:6]: qi = \"NN\" + qi spos = i break", "= False logs[r.sequence_id][\"FAIL\"] = \"Germline PTC\" fails[\"seq_fail\"] += 1 fails[\"germlineptc\"] += 1 return", "+ ki.count(\"G\") + ki.count(\"C\") ncountj = kj.count(\"A\") + kj.count(\"T\") + kj.count(\"G\") + kj.count(\"C\")", "= [] for i in range(0, len(newseqs)): nseq = [] ncdr3 = 0", "= collapse printLog(log) # Open output files out_label = \"lineages\" pass_handle = getOutputHandle(db_file,", "getCommonArgParser(out_file=False, log=True, format=True) # Define argument parser parser = ArgumentParser(description=__doc__, epilog=fields, parents=[parser_parent], formatter_class=CommonHelpFormatter,", "action=\"store\", dest=\"omega\", type=str, default=\"e,e\", choices = (\"e\", \"ce\", \"e,e\", \"ce,e\", \"e,ce\", \"ce,ce\"), help=\"\"\"Omega", "identical sequences with different meta_data. delim (str): delimited to use when appending meta_data.", "= out_args[\"out_name\"] if dir_name is None: clone_dir = clone_name else: clone_dir = os.path.join(dir_name,", "in range(0,len(meta_data)): if isinstance(clones[j].getField(meta_data[m]), str): clones[j].setField(meta_data[m],clones[j].getField(meta_data[m]).replace(\"_\", \"\")) meta_data_list.append(str(clones[j].getField(meta_data[m]))) conseq_f = \"\".join([str(seq_rec) for seq_rec", "def runIgPhyML(outfile, igphyml_out, clone_dir, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", nohlp=False, asr=-1,", "include. Returns: int: number of clones. \"\"\" s = \"\" delim = \"_\"", "= clones[j].sequence_id.translate(transtable)+\"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) germ_id = [\"GERM\"] if", "useqs_f = deduplicate(useqs_f, clones, logs, meta_data, delim) if collapse and len(useqs_f) < min_seq:", "m != \"DUPCOUNT\": matches += 1 m_match = (matches == len(meta_data)) if dist", "nargs=\"+\", action=\"store\", dest=\"meta_data\", help=\"\"\"List of fields to containing metadata to include in output", "db_file (str): input tab-delimited database file. meta_data (str): Field to append to sequence", "\"0\" sid = clones[num].sequence_id.translate(transtable) + \"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\")))", "not nohlp: try: #estimate HLP parameters/trees p = subprocess.check_output(hlp_args) except subprocess.CalledProcessError as e:", "printError(\"Cannot export datasets until sequences are clustered into clones.\") if r.dupcount is None:", "regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] imgtpartlabels = [13]*len(regions[\"fwr1_imgt\"]) + [30]*len(regions[\"cdr1_imgt\"]) +", "the IMGT seq, other times it will be legitimately absent from the query,", "newseqs[j].append(sequences[j][i:(i+3)]) lcodon = \"\" for i in range(0, sites, 3): if tallies[i//3] >", "\"\"\" Determines whether a PTC exits in a sequence Arguments: sequence (str): IMGT", "(str): delete intermediate files? (none, all) \"\"\" osplit = outfile.split(\".\") outrep = \".\".join(osplit[0:(len(osplit)-1)])", "= 0 for i in gaps: #print(str(i) + \":\" + ncon_seq) if i", "for m in range(0,len(meta_data)): if isinstance(clones[j].getField(meta_data[m]), str): clones[j].setField(meta_data[m],clones[j].getField(meta_data[m]).replace(\"_\", \"\")) meta_data_list.append(str(clones[j].getField(meta_data[m]))) conseq_f = \"\".join([str(seq_rec)", "0 printMessage(\"Processing clones\", start_time=start_time, width=50) for k in clones.keys(): if len(clones[str(k)]) < min_seq:", "group.add_argument(\"--append\", nargs=\"+\", action=\"store\", dest=\"append\", help=\"\"\"List of columns to append to sequence ID to", "\\ [60]*len(regions[\"cdr2_imgt\"]) + [80]*len(regions[\"fwr3_imgt\"]) + [108] * len(regions[\"cdr3_imgt\"]) + \\ [120] * len(regions[\"fwr4_imgt\"])", "= \"NONFUNCTIONAL/PTC\" log[\"SEQ_IN\"] = r.sequence_input logs[r.sequence_id] = log if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] +=", "for i in ndotgaps: if i == 1: curgap += 1 elif i", "IgPhyML (--hotness) oformat (str): output format for IgPhyML (tab or txt) clean (str):", "ends on a bunch of Ns qpos += 1 spos += 1 else:", "action=\"store\", choices=(\"none\", \"all\"), dest=\"clean\", type=str, default=\"none\", help=\"\"\"Delete intermediate files? none: leave all intermediate", "log[\"START\"] = \"IgPhyML GY94 tree estimation\" printLog(log) try: #check for igphyml executable subprocess.check_output([\"igphyml\"])", "= {} # id -> useq to join with (least ambiguous chars) ambigchar", "#remove parenthesis from metadata r.setField(meta_data[m],md) if append is not None: if append is", "Returns: int: negative if not PTCs, position of PTC if found. \"\"\" ptcs", "cloneseqs (list): list of masked clone sequences. logs (dict): contains log information for", "\\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] imgtpartlabels = [13]*len(regions[\"fwr1_imgt\"]) + [30]*len(regions[\"cdr1_imgt\"])", "range(spos, len(scodons)): if scodons[i] != \"...\" and len(scodons[i]) == 3 and scodons[i] !=", "log_handle is not None: log_handle.close() #printProgress(rec_count, rec_count, 0.05, start_time) log = OrderedDict() log[\"OUTPUT\"]", "run HLP model?\"\"\") igphyml_group.add_argument(\"--asr\", action=\"store\", dest=\"asr\", type=float, default=-1, help=\"\"\"Ancestral sequence reconstruction interval (0-1).\"\"\")", "else: # if not the same, mask IMGT at that site and scan", "log[\"MOTIFS\"] = motifs log[\"HOTNESS\"] = hotness log[\"NPROC\"] = nproc printLog(log) if not nohlp:", "regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] imgtpartlabels", "getArgParser(): \"\"\" Defines the ArgumentParser Returns: argparse.ArgumentParser: argument parsers. \"\"\" # Define input", "clone %d!\\n\" % c.clone,False) printError(c.getField(\"imgtpartlabels\"),False) printError(\"%s\\n%d\\n\" % (imgtar,j),False) for j in range(0, len(sequences)):", "sequence they will be collapse to. if ncountj > ncounti: nci = 0", "in a sequence Arguments: receptor (changeo.Receptor.Receptor): Receptor object. oqpos (int) : position of", "= outfile + \"_igphyml_stats_gy.txt\" gy_args = [\"igphyml\", \"--repfile\", outfile, \"-m\", \"GY\", \"--run_id\", \"gy\",", "files out_label = \"lineages\" pass_handle = getOutputHandle(db_file, out_label=out_label, out_dir=out_args[\"out_dir\"], out_name= out_args[\"out_name\"], out_type=\"tsv\") igphyml_out", "= \"FAILED_MATCH_QSTRING:\"+str(spos) #figure out if this was due to a frame-shift by repeating", "r.clone is None: printError(\"Cannot export datasets until sequences are clustered into clones.\") if", "log[\"IN-FRAME\"] + \",\" + str(spos) else: log[\"IN-FRAME\"] = str(spos) elif qpos >= len(qcodons)", "pass_count, nclones = 0, 0 printMessage(\"Processing clones\", start_time=start_time, width=50) for k in clones.keys():", "masked, concatenated sequences outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm, conseqs, duplicate,", "in range(0, nseqs): cid = \"\" if meta_data is not None: meta_data_list =", "clonef.write(\"%s\" % newgerm[i].replace(\".\",\"-\")) clonef.write(\"\\n\") #output partition file partfile = os.path.join(out_dir, \"%s.part.txt\" % clones[0].clone)", "logs[r.sequence_id] = log if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"nf_fail\"] += 1 return", "igphyml_group.add_argument(\"-t\", action=\"store\", dest=\"kappa\", type=str, default=\"e\", choices=(\"e\", \"ce\"), help=\"\"\"Kappa parameters to estimate: e =", "all CDR3s. nmask (bool): if False, do not attempt to mask split codons", "pass_count # End clone processing printMessage(\"Done\", start_time=start_time, end=True, width=50) log_handle = None if", "concatenated_seq[counter:] concatenated_seq = ncon_seq log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = concatenated_seq", "logs[clones[j].sequence_id][\"PASS\"] = False return -len(conseqs) # Output fasta file of masked, concatenated sequences", "IgPhyML across.\"\"\") igphyml_group.add_argument(\"--clean\", action=\"store\", choices=(\"none\", \"all\"), dest=\"clean\", type=str, default=\"none\", help=\"\"\"Delete intermediate files? none:", "open(partfile, \"w\") as partf: partf.write(\"%d %d\\n\" % (2, len(newgerm))) partf.write(\"FWR:IMGT\\n\") partf.write(\"CDR:IMGT\\n\") partf.write(\"%s\\n\" %", "dest=\"sample_depth\", type=int, default=-1, help=\"\"\"Depth of reads to be subsampled (before deduplication).\"\"\") group.add_argument(\"--append\", nargs=\"+\",", "choices=(\"n\",\"r\",\"l\",\"lr\",\"tl\",\"tlr\"), help=\"\"\"Optimize combination of topology (t) branch lengths (l) and parameters (r), or", "if r.functional and ptcs < 0: #If IMGT regions are provided, record their", "--oformat txt will delete all tree file results.\\n\" \"You'll have to do that", "if sample_depth > 0: random.shuffle(big_enough) total = 0 for r in big_enough: if", "details)\") igphyml_group.add_argument(\"--igphyml\", action=\"store_true\", dest=\"igphyml\", help=\"\"\"Run IgPhyML on output?\"\"\") igphyml_group.add_argument(\"--nproc\", action=\"store\", dest=\"nproc\", type=int, default=1,", "difference found? Returns: int: number of ACGT differences. \"\"\" if len(seq1) != len(seq2):", "input sequence, modified starting position of IMGT sequence in input sequence). \"\"\" spos", "if not recursive: frameshifts += checkFrameShifts(receptor, oqpos, ospos, log, debug) elif spos >=", "= clone_name else: clone_dir = os.path.join(dir_name, clone_name) if not os.path.exists(clone_dir): os.makedirs(clone_dir) # Format", "for j in logs.keys(): printLog(logs[j], handle=log_handle) pass_handle.write(str(nclones)+\"\\n\") for key in sorted(clonesizes, key=clonesizes.get, reverse=True):", "site and scan forward until you find a codon that matches next site", "j in range(i+1,len(keys)): ki = keys[i] kj = keys[j] if meta_data is None:", "Define input and output field help message fields = dedent( \"\"\" output files:", "to IMGT sequence ncon_seq = \"\" counter = 0 for i in gaps:", "rj.getField(m) and m != \"DUPCOUNT\": matches += 1 m_match = (matches == len(meta_data))", "r in all_records: if target_clones is None or r.clone in target_clones: if init_clone_sizes[r.clone]", "os.makedirs(clone_dir) # Format options try: reader, writer, __ = getFormatOperators(format) except ValueError: printError(\"Invalid", "meta_data. clones (list) : list of receptor objects. collapse (bool) : deduplicate sequences.", "the sequence they will be collapse to. if ncountj > ncounti: nci =", "fields=out_fields) cloneseqs = {} clones = {} logs = OrderedDict() fails = {\"rec_count\":0,", "getOutputHandle, getOutputName from changeo.Alignment import RegionDefinition from changeo.Commandline import CommonHelpFormatter, checkArgs, getCommonArgParser, parseCommonArgs", "+ regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] imgtpartlabels = [13]*len(regions[\"fwr1_imgt\"]) + [30]*len(regions[\"cdr1_imgt\"]) + [45]*len(regions[\"fwr2_imgt\"])", "if nci < ncountj: join[useqs[ki]] = useqs[kj] joinseqs[ki] = kj else: ncj =", "clones[useqs_f[conseq_f]].dupcount += clones[j].dupcount logs[clones[j].sequence_id][\"PASS\"] = False logs[clones[j].sequence_id][\"FAIL\"] = \"Duplication of \" + clones[useqs_f[conseq_f]].sequence_id", "i in ndotgaps: if i == 1: curgap += 1 elif i ==", "1 if qcodons[qpos-1] == scodons[ospos]: #if codon in previous position is equal to", "= \"Clone too small: \" + str(len(useqs_f)) logs[clones[num].sequence_id][\"PASS\"] = False return -len(useqs_f) elif", "else: init_clone_sizes[r.clone] = 1 for r in all_records: if target_clones is None or", "data newgerm = [] imgt = [] for j in range(0, nseqs): for", "spos = 0 for i in range(0, len(scodons)): printDebug(\"%s %s\" % (scodons[i], qi[0:3]),", "% (clones[0].v_call.split(\"*\")[0])) partf.write(\"%s\\n\" % (clones[0].j_call.split(\"*\")[0])) partf.write(\",\".join(map(str, imgt))) partf.write(\"\\n\") def outputIgPhyML(clones, sequences, meta_data=None, collapse=False,", "add_help=False) group = parser.add_argument_group(\"sequence processing arguments\") group.add_argument(\"--collapse\", action=\"store_true\", dest=\"collapse\", help=\"\"\"If specified, collapse identical", "len(simgt) < len(r.sequence_imgt): simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"]", "in previous position is equal to original codon, it was preserved qpos -=", "call? mask (bool) : mask split codons for use with igphyml? Returns: str:", "\"\"\" Calculate the distance between two sequences counting only A,T,C,Gs Arguments: seq1 (str):", "str(ncdr3)) def characterizePartitionErrors(sequences, clones, meta_data): \"\"\" Characterize potential mismatches between IMGT labels within", "else: curgap = 0 si = nsi scodons = [si[i:i + 3] for", "ki.count(\"C\") ncountj = kj.count(\"A\") + kj.count(\"T\") + kj.count(\"G\") + kj.count(\"C\") ambigchar[useqs[ki]] = ncounti", "data sequences. Any clones with fewer than the specified number of sequences will", "splitName(db_file) else: clone_name = out_args[\"out_name\"] if dir_name is None: clone_dir = clone_name else:", "type=str, default=\"e\", choices=(\"e\", \"ce\"), help=\"\"\"Kappa parameters to estimate: e = estimate, ce =", "parameters to estimate: e = estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--oformat\",", "on output?\"\"\") igphyml_group.add_argument(\"--nproc\", action=\"store\", dest=\"nproc\", type=int, default=1, help=\"\"\"Number of threads to parallelize IgPhyML", "s_end += 1 qpos = 0 if mask: findAndMask(receptor, scodons, qcodons, spos, s_end,", "if len(clones[str(k)]) < min_seq: for j in range(0, len(clones[str(k)])): logs[clones[str(k)][j].sequence_id][\"FAIL\"] = \"Clone too", "min_seq=min_seq) #If clone is too small, size is returned as a negative if", "help=\"\"\"List of fields to containing metadata to include in output fasta file sequence", "% c.clone, False) printError(c.getField(\"imgtpartlabels\"), False) printError(\"%s\\n%d\\n\" % (imgtar, j)) #Resolve germline if there", "ncountj # this algorithm depends on the fact that all sequences are compared", "type=str, default=\"tab\", choices=(\"tab\", \"txt\"), help=\"\"\"IgPhyML output format.\"\"\") igphyml_group.add_argument(\"--nohlp\", action=\"store_true\", dest=\"nohlp\", help=\"\"\"Don't run HLP", "= newgerm[-1] + \"N\" elif len(lcodon) == 1: newgerm[-1] = newgerm[-1] + \"NN\"", "is just a gap spos += 1 while qpos < len(qcodons) and spos", "= clones[0].getField(\"germline_imgt\") correctseqs = False for seqi in range(0, len(sequences)): i = sequences[seqi]", "outputted data Arguments: outfile (str): Output file name. igphymlout (str): igphyml output file", "of IMGT sequence qpos (int): starting position of input sequence in IMGT sequence", "= outputIgPhyML(clones[str(k)], cloneseqs[str(k)], meta_data=meta_data, collapse=collapse, ncdr3=ncdr3, logs=logs, fail_writer=fail_writer, out_dir=clone_dir, min_seq=min_seq) #If clone is", "depends on the fact that all sequences are compared pairwise, and all are", "clones (list): list of Receptor objects. \"\"\" for i in range(0,len(sequences)): imgtar =", "IDs to output, if specified.\"\"\") group.add_argument(\"--minseq\", action=\"store\", dest=\"min_seq\", type=int, default=1, help=\"\"\"Minimum number of", "mask_seq = mout[0] ptcs = hasPTC(mask_seq) if ptcs >= 0: printWarning(\"Masked sequence suddenly", "spos >= s_end: printDebug(\"Masked %s at position %d, at end of subject sequence\"", "not attempt to mask split codons sample_depth (int): depth of subsampling before deduplication", "= clones[0].sequence_id.maketrans(\" \", \"_\") outfile = os.path.join(out_dir, \"%s.fasta\" % clones[0].clone) with open(outfile, \"w\")", "not found :-/\") try: #get GY94 starting topologies p = subprocess.check_output(gy_args) except subprocess.CalledProcessError", "from Bio.Seq import Seq from functools import partial # Presto and changeo imports", "#sequence id -> number ATCG nucleotides for i in range(0,len(keys)-1): for j in", "gyout = outfile + \"_igphyml_stats_gy.txt\" gy_args = [\"igphyml\", \"--repfile\", outfile, \"-m\", \"GY\", \"--run_id\",", "\"seq_fail\":0, \"nf_fail\":0, \"del_fail\":0, \"in_fail\":0, \"minseq_fail\":0, \"other_fail\":0, \"region_fail\":0, \"germlineptc\":0, \"fdcount\":0, \"totalreads\":0, \"passreads\":0, \"failreads\":0} #", "labels within a clone Arguments: sequences (list): list of sequences in clones. clones", "all) nohlp (bool): If True, only estimate GY94 trees and parameters format (str):", "exits in a sequence Arguments: sequence (str): IMGT gapped sequence in frame 1.", "= 0 for r in big_enough: if r.functional is None: r.functional = True", "qi[2:3] or scodons[i + 1] != qi[3:6]: qi = \"NN\" + qi spos", "of sequence information \"\"\" debug = False qi = receptor.sequence_input si = receptor.sequence_imgt", "out_args[\"out_name\"], out_type=\"tsv\") igphyml_out = None if igphyml: igphyml_out = getOutputName(db_file, out_label=\"igphyml-pass\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"],", "+= 1 while spos < s_end and scodons[spos] == \"...\": #possible next codon", "sequence. debug (bool) : print debugging statements. \"\"\" frameshifts = 0 for ins", "getOutputName(db_file, out_label=\"igphyml-pass\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=oformat) dir_name, __ = os.path.split(pass_handle.name) if out_args[\"out_name\"] is None:", "receptor.sequence_imgt log[\"SEQ_MASKED\"] = receptor.sequence_imgt return receptor.sequence_imgt, log else: curgap = 0 si =", "ncon_seq log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = concatenated_seq return concatenated_seq, log", "for i in gaps: #print(str(i) + \":\" + ncon_seq) if i == 1:", "Define argument parser parser = ArgumentParser(description=__doc__, epilog=fields, parents=[parser_parent], formatter_class=CommonHelpFormatter, add_help=False) group = parser.add_argument_group(\"sequence", "it will be legitimately absent from the query, at which point #we have", "mout[1][\"FAIL\"] = \"PTC_ADDED_FROM_MASKING\" logs[mout[1][\"ID\"]] = mout[1] if mout[1][\"PASS\"]: #passreads += r.dupcount if r.clone", "arguments for output preferences. fail_writer (changeo.IO.TSVWriter): failed sequences writer object. Returns: 0: returns", "germline: resolveglines = True if resolveglines: printError(\"%s %s\" % (\"Predicted germlines are not", "while spos < s_end and qpos < len(qcodons): if debug: print(scodons[spos] + \"\\t\"", "Mask codons split by indels start_time = time() printMessage(\"Correcting frames and indels of", "forward in imgt spos += 1 elif scodons[spos] == qcodons[qpos]: # if both", "returns 0 if an error occurs or masking fails. 1: returns 1 masking", "clones[useqs_f[conseq_f]].sequence_id logs[clones[j].sequence_id][\"DUPLICATE\"]=True if fail_writer is not None: fail_writer.writeReceptor(clones[j]) else: useqs_f[conseq_f] = j conseqs.append(conseq)", "sites, 3): if tallies[i//3] > 0: newgerm.append(germline[i:(i+3)]) lcodon=germline[i:(i+3)] imgt.append(imgtar[i]) if len(lcodon) == 2:", "line.rstrip(\"\\n\") line = line.rstrip(\"\\r\") lsplit = line.split(\"\\t\") if len(lsplit) == 4: os.remove(lsplit[0]) os.remove(lsplit[1])", "default=\"e,e,e,e,e,e\", help=\"\"\"Mutability parameters to estimate: e = estimate, ce = estimate + confidence", "= 0 for m in meta_data: if ri.getField(m) == rj.getField(m) and m !=", "PTC exits in a sequence Arguments: sequence (str): IMGT gapped sequence in frame", "ambiguous chars) joinseqs = {} # id -> useq to join with (least", "newgerm, conseqs, duplicate, imgt): \"\"\" Create intermediate sequence alignment and partition files for", "= writer(fail_handle, fields=out_fields) cloneseqs = {} clones = {} logs = OrderedDict() fails", "regions[\"fwr3_imgt\"] is not \"\" and regions[\"fwr3_imgt\"] is not None: simgt = regions[\"fwr1_imgt\"] +", "qpos (int): starting position of input sequence in IMGT sequence log (dict): log", "sequence. spos (int) : starting position of IMGT sequence in input sequence. debug", "ncj = ambigchar[join[useqs[kj]]] if ncj < ncounti: join[useqs[kj]] = useqs[ki] joinseqs[kj] = ki", "= True # duplicate sequences in clones with only 1 sequence? imgtar, germline,", "that matches next site if debug: print(\"checking %s at position %d %d\" %", "3: if scodons[-1] == \"..\" or scodons[-1] == \".\": scodons[-1] = \"...\" else:", "scodons[i][2:3] != qi[2:3] or scodons[i + 1] != qi[3:6]: qi = \"NN\" +", "(clones[0].j_call.split(\"*\")[0])) partf.write(\",\".join(map(str, imgt))) partf.write(\"\\n\") def outputIgPhyML(clones, sequences, meta_data=None, collapse=False, ncdr3=False, logs=None, fail_writer=None, out_dir=None,", "== 1: curgap += 1 elif i == 0 and curgap != 0:", "fails[\"totalreads\"] log[\"INITIAL_FILTER\"] = fails[\"rec_count\"] log[\"PASS\"] = pass_count log[\"FAIL\"] = fail_count log[\"NONFUNCTIONAL\"] = fails[\"nf_fail\"]", "+ confidence interval\"\"\") igphyml_group.add_argument(\"--oformat\", action=\"store\", dest=\"oformat\", type=str, default=\"tab\", choices=(\"tab\", \"txt\"), help=\"\"\"IgPhyML output format.\"\"\")", "= \"Clone too small: \" + str(len(conseqs)) logs[clones[j].sequence_id][\"PASS\"] = False return -len(conseqs) #", "#subsampling loop init_clone_sizes = {} big_enough = [] all_records = [] found_no_funct =", "def unAmbigDist(seq1, seq2, fbreak=False): \"\"\" Calculate the distance between two sequences counting only", "if meta_data is not None: seq, cid = seq_f.split(delim) cid = delim +", "will be excluded.\"\"\") group.add_argument(\"--sample\", action=\"store\", dest=\"sample_depth\", type=int, default=-1, help=\"\"\"Depth of reads to be", "\" + rto.sequence_id del useqs[k] return useqs def hasPTC(sequence): \"\"\" Determines whether a", "while spos < s_end and scodons[spos] == \"...\": #possible next codon is just", "first difference found? Returns: int: number of ACGT differences. \"\"\" if len(seq1) !=", "for key in sorted(clonesizes, key=clonesizes.get, reverse=True): #print(key + \"\\t\" + str(clonesizes[key])) outfile =", "def maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer, mask=True): \"\"\" Masks codons split", "spos (int): starting position of IMGT sequence in input sequence s_end (int): end", "1 spos += 1 else: # if not the same, mask IMGT at", "(int): number of sequences. delim (str) : delimiter for extracting metadata from ID.", "+ regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] if len(simgt) < len(r.sequence_imgt):", "spos < s_end and scodons[spos] == \"...\": #possible next codon is just a", "codon is just a gap spos += 1 while qpos < len(qcodons) and", "sequence ID r.sequence_id = r.sequence_id.replace(\":\",\"-\") #remove colons from sequence ID r.sequence_id = r.sequence_id.replace(\",\",\"-\")", "germline = clones[0].getField(\"germline_imgt_d_mask\") if germline is \"\": germline = clones[0].getField(\"germline_imgt\") correctseqs = False", "0: printError(\"number of sites must be divisible by 3! len: %d, clone: %s", "printError(\"HLP tree building failed\") log = OrderedDict() log[\"OUTPUT\"] = igphyml_out if oformat ==", "for r in big_enough: if r.functional is None: r.functional = True if found_no_funct", "threads to parallelize IgPhyML across.\"\"\") igphyml_group.add_argument(\"--clean\", action=\"store\", choices=(\"none\", \"all\"), dest=\"clean\", type=str, default=\"none\", help=\"\"\"Delete", "ValueError: printError(\"Invalid format %s.\" % format) out_fields = getDbFields(db_file, reader=reader) # open input", "useqs_f.items(): logs[clones[num].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(useqs_f)) logs[clones[num].sequence_id][\"PASS\"] = False return", "False logs[clones[j].sequence_id][\"FAIL\"] = \"Duplication of \" + clones[useqs_f[conseq_f]].sequence_id logs[clones[j].sequence_id][\"DUPLICATE\"]=True if fail_writer is not", "+= 1 elif mout[1][\"FAIL\"] == \"SINGLE FRAME-SHIFTING INSERTION\": fails[\"in_fail\"] += 1 else: fails[\"other_fail\"]", "OrderedDict() log[\"START\"] = \"BuildTrees\" log[\"FILE\"] = os.path.basename(db_file) log[\"COLLAPSE\"] = collapse printLog(log) # Open", "move forward in imgt spos += 1 elif scodons[spos] == qcodons[qpos]: # if", "collapse=False, ncdr3=False, nmask=False, sample_depth=-1, min_seq=1,append=None, igphyml=False, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\",", "ID r.sequence_id = r.sequence_id.replace(\":\",\"-\") #remove colons from sequence ID r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove", "to append to sequence_id igphyml (bool): If True, run IgPhyML on outputted data", "omega log[\"MOTIFS\"] = motifs log[\"HOTNESS\"] = hotness log[\"NPROC\"] = nproc printLog(log) if not", "codons sample_depth (int): depth of subsampling before deduplication min_seq (int): minimum number of", "(str): input tab-delimited database file. meta_data (str): Field to append to sequence IDs.", "all tree file results.\\n\" \"You'll have to do that yourself.\") log = OrderedDict()", "regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] imgtpartlabels = [13]*len(regions[\"fwr1_imgt\"]) + [30]*len(regions[\"cdr1_imgt\"]) + [45]*len(regions[\"fwr2_imgt\"]) +", "= os.path.join(clone_dir, \"%s.part.txt\" % key) if clonesizes[key] > 0: germ_id = [\"GERM\"] if", "%s\" %(len(sequences[0]),\\ clones[0].clone,clones[0].sequence_id,sequences[0])) return imgtar, germline, sites, nseqs def outputSeqPartFiles(out_dir, useqs_f, meta_data, clones,", "if there are differences, e.g. if reconstruction was done before clonal clustering resolveglines", "i == 0: ngerm.append(newgerm[j]) nimgt.append(imgt[j]) else: ncdr3 += 1 newseqs[i] = nseq newgerm", "not None: for m in append: r.sequence_id = r.sequence_id + \"_\" + r.getField(m)", "print(\"\\n\" + str(clones[i].sequence_id)) # print(\"\\n \" + str((sequences[i])) ) # print(\"\\n\" + str((germline)))", "= qi[(receptor.v_seq_start - 1):] #tally where --- gaps are in IMGT sequence and", "input files \"\"\" # Info __author__ = \"<NAME>\" from changeo import __version__, __date__", "time from Bio.Seq import Seq from functools import partial # Presto and changeo", "None: if meta_data[0] == \"DUPCOUNT\": cid = delim + \"0\" sid = clones[j].sequence_id.translate(transtable)+\"_1\"", "IgPhyML (--hotness) oformat (str): output format for IgPhyML (tab or txt) nohlp (bool):", "elif qcodons[qpos] == \"N\": # possible that SEQ-IMGT ends on a bunch of", "on a bunch of Ns qpos += 1 spos += 1 else: #", "seq = seq_f cid = \"\" if meta_data is not None: seq, cid", "simgt fails[\"seq_fail\"] += 1 fails[\"region_fail\"] += 1 return 0 else: #imgt_warn = \"\\n!", "newgerm = [] imgt = [] for j in range(0, nseqs): for i", "outputted data nproc (int) : Number of threads to parallelize IgPhyML across optimization", "with (least ambiguous chars) ambigchar = {} #sequence id -> number ATCG nucleotides", "return 0 # Run IgPhyML on outputed data def runIgPhyML(outfile, igphyml_out, clone_dir, nproc=1,", "\"%s.fasta\" % clones[0].clone) with open(outfile, \"w\") as clonef: if collapse: for seq_f, num", "germline + \"N\" * (seqdiff) if sites % 3 != 0: printError(\"number of", "e: print(\" \".join(gy_args)) print('error>', e.output, '<') printError(\"GY94 tree building in IgPhyML failed\") log", "ncounti = ki.count(\"A\") + ki.count(\"T\") + ki.count(\"G\") + ki.count(\"C\") ncountj = kj.count(\"A\") +", "mout[0] ptcs = hasPTC(mask_seq) if ptcs >= 0: printWarning(\"Masked sequence suddenly has a", "kappa, \"--motifs\", motifs, \"--hotness\", hotness, \"--oformat\", oformat, \"--outname\", igphyml_out] if asr >= 0:", "igf.readline().split(\"\\t\") vals = igf.readline().split(\"\\t\") for i in range(3,len(names)-1): log[names[i]] = round(float(vals[i]),2) printLog(log) if", "info log = OrderedDict() log[\"START\"] = \"BuildTrees\" log[\"FILE\"] = os.path.basename(db_file) log[\"COLLAPSE\"] = collapse", "1. Returns: int: negative if not PTCs, position of PTC if found. \"\"\"", "reconstruction interval (0-1).\"\"\") return parser if __name__ == \"__main__\": \"\"\" Parses command line", "r.fwr4_imgt = r.fwr4_imgt + (\".\"*(len(r.sequence_imgt) - len(simgt))) simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] +", "\",\" + str(spos) else: log[\"MASKED\"] = str(spos) else: log[\"PASS\"] = False log[\"FAIL\"] =", "and indels of sequences\", start_time=start_time, width=50) #subsampling loop init_clone_sizes = {} big_enough =", "the specified number of sequences will be excluded.\"\"\") group.add_argument(\"--sample\", action=\"store\", dest=\"sample_depth\", type=int, default=-1,", "+= 1 elif scodons[spos] == qcodons[qpos]: # if both are the same, move", "= len(all_records) - len(big_enough) if len(big_enough) == 0: printError(\"\\n\\nNo sequences found that match", "= fails[\"other_fail\"] if collapse: log[\"DUPLICATE\"] = fail_count - fails[\"seq_fail\"] log[\"END\"] = \"BuildTrees\" printLog(log)", "rto.dupcount += rfrom.dupcount if log is not None: log[rfrom.sequence_id][\"PASS\"] = False log[rfrom.sequence_id][\"DUPLICATE\"] =", "skj, True) m_match = True if meta_data is not None: matches = 0", "\"SINGLE FRAME-SHIFTING INSERTION\" break else: receptor.sequence_input = ros receptor.sequence_imgt = ris return frameshifts", "!= \".\": ndotgaps.append(0) #find any gaps not divisible by three curgap = 0", "log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" + str(spos) else: log[\"END-MASKED\"] = str(spos) else: printDebug(\"Masked", "join[useqs[ki]] = useqs[kj] joinseqs[ki] = kj else: ncj = 0 if useqs[kj] in", "for details)\") igphyml_group.add_argument(\"--igphyml\", action=\"store_true\", dest=\"igphyml\", help=\"\"\"Run IgPhyML on output?\"\"\") igphyml_group.add_argument(\"--nproc\", action=\"store\", dest=\"nproc\", type=int,", "False for seqi in range(0, len(sequences)): i = sequences[seqi] if len(i) != sites", "help=\"\"\"Depth of reads to be subsampled (before deduplication).\"\"\") group.add_argument(\"--append\", nargs=\"+\", action=\"store\", dest=\"append\", help=\"\"\"List", "ki # loop through list of joined sequences and collapse keys = list(useqs.keys())", "if os.path.isfile(cilog): os.remove(cilog) if oformat == \"tab\": os.rmdir(clone_dir) else: printWarning(\"Using --clean all with", "legitimately absent from the query, at which point #we have to shift the", "of IMGT sequence in input sequence). \"\"\" spos = 0 for i in", "qpos += 1 if qcodons[qpos-1] == scodons[ospos]: #if codon in previous position is", "estimate mutability.\"\"\") igphyml_group.add_argument(\"--hotness\", action=\"store\", dest=\"hotness\", type=str, default=\"e,e,e,e,e,e\", help=\"\"\"Mutability parameters to estimate: e =", "ndotgaps: if i == 1: curgap += 1 elif i == 0 and", "containing fasta and partition files for each clone. lineages successfully processed records. lineages-fail", "dest=\"omega\", type=str, default=\"e,e\", choices = (\"e\", \"ce\", \"e,e\", \"ce,e\", \"e,ce\", \"ce,ce\"), help=\"\"\"Omega parameters", "None if out_args[\"log_file\"] is not None: log_handle = open(out_args[\"log_file\"], \"w\") for j in", "collapse (bool): if True collapse identical sequences. ncdr3 (bool): if True remove all", "collapse: for seq_f, num in useqs_f.items(): seq = seq_f cid = \"\" if", "len(sequences[j]) > maxlen: maxlen = len(sequences[j]) if len(clones[j].getField(\"imgtpartlabels\")) > maximgt: imgtar = clones[j].getField(\"imgtpartlabels\")", "1 qpos += 1 elif qcodons[qpos] == \"N\": # possible that SEQ-IMGT ends", "Number of threads to parallelize IgPhyML across optimization (str): Optimize combination of topology", "different meta_data. Returns: tuple: tuple of length four containing a list of IMGT", "= ris frameshifts += 1 printDebug(\"FRAMESHIFT of length %d!\" % ins, debug) log[\"FAIL\"]", "2: newgerm[-1] = newgerm[-1] + \"N\" elif len(lcodon) == 1: newgerm[-1] = newgerm[-1]", "start_time) log = OrderedDict() log[\"OUTPUT\"] = os.path.basename(pass_handle.name) if pass_handle is not None else", "\"\"\" # Print parameter info log = OrderedDict() log[\"START\"] = \"BuildTrees\" log[\"FILE\"] =", "handle = open(db_file, \"r\") records = reader(handle) fail_handle, fail_writer = None, None if", "deduplicate(useqs, receptors, log=None, meta_data=None, delim=\":\"): \"\"\" Collapses identical sequences Argument: useqs (dict): unique", "log[\"SEQ_IMGT\"] = r.sequence_imgt logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"Germline PTC\"", "> maximgt: imgtar = clones[j].getField(\"imgtpartlabels\") maximgt = len(imgtar) sites = maxlen for j", "motif in IgPhyML (--hotness) oformat (str): output format for IgPhyML (tab or txt)", "False return -len(useqs_f) elif not collapse and len(conseqs) < min_seq: for j in", "return nseqs def maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer, mask=True): \"\"\" Masks", "(tab or txt) clean (str): delete intermediate files? (none, all) nohlp (bool): If", "output files. fail_writer (changeo.IO.TSVWriter): failed sequences writer object. min_seq (int): minimum number of", "--- gaps back to IMGT sequence ncon_seq = \"\" counter = 0 for", "to append to sequence IDs. Splits identical sequences with different meta_data. delim (str):", "meta_data, clones, collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt): \"\"\" Create intermediate sequence", "simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] nseq", "= 0 s_end = 0 #adjust for the fact that IMGT sequences can", "\"\\t\" + str(clonesizes[key])) outfile = os.path.join(clone_dir, \"%s.fasta\" % key) partfile = os.path.join(clone_dir, \"%s.part.txt\"", "in clones, the germline sequence of the first receptor in clones, the length", "out_dir=out_args[\"out_dir\"], out_name= out_args[\"out_name\"], out_type=\"tsv\") igphyml_out = None if igphyml: igphyml_out = getOutputName(db_file, out_label=\"igphyml-pass\",", "list of receptor objects. collapse (bool) : deduplicate sequences. nseqs (int): number of", "alignment to IMGT reference, then produces input files for IgPhyML Arguments: db_file (str):", "matches next site if debug: print(\"checking %s at position %d %d\" % (scodons[spos],", "forward spos += 1 qpos += 1 elif qcodons[qpos] == \"N\": # possible", "fails[\"germlineptc\"] += 1 return 0 if r.functional and ptcs < 0: #If IMGT", "meta_data. delim (str): delimited to use when appending meta_data. Returns: list: deduplicated receptors", "= fail_count - fails[\"seq_fail\"] log[\"END\"] = \"BuildTrees\" printLog(log) #Run IgPhyML on outputted data?", "+ regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] if len(simgt) < len(r.sequence_imgt): r.fwr4_imgt = r.fwr4_imgt + (\".\"*(len(r.sequence_imgt)", "log[rfrom.sequence_id][\"PASS\"] = False log[rfrom.sequence_id][\"DUPLICATE\"] = True log[rfrom.sequence_id][\"COLLAPSETO\"] = joinseqs[k] log[rfrom.sequence_id][\"COLLAPSEFROM\"] = k log[rfrom.sequence_id][\"FAIL\"]", "input sequence). \"\"\" spos = 0 for i in range(0, len(scodons)): printDebug(\"%s %s\"", "spos = i break elif scodons[i][0] == \".\": scodons[i] = \"N\" + scodons[i][1:3]", "of offending codon receptor.sequence_input = ros[0:(psite + 3)] + ros[(psite + 3 +", "None: printError(\"Cannot export datasets until sequences are clustered into clones.\") if r.dupcount is", "(outfile, \"N\", key,\"_\".join(germ_id), partfile)) handle.close() output = {\"pass\": None, \"fail\": None} if pass_handle", "fail_writer=fail_writer, out_dir=clone_dir, min_seq=min_seq) #If clone is too small, size is returned as a", "in range(0, nseqs): logs[clones[j].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(conseqs)) logs[clones[j].sequence_id][\"PASS\"] =", "init_clone_sizes = {} big_enough = [] all_records = [] found_no_funct = False for", "!= \"DUPCOUNT\": matches += 1 m_match = (matches == len(meta_data)) if dist ==", "information for each sequence. debug (bool) : print debugging statements. \"\"\" frameshifts =", "+ str((germline))) for j in range(0,len(imgtar)): if imgtar[j] != 108: nseq.append(sequences[i][j]) if j", "to a frame-shift by repeating this method but with an edited input sequence", "numbering of clonal positions . \"\"\" # bootstrap these data if desired lg", "printDebug(\"Checking \" + scodons[spos]+ \"\\t\" + qcodons[qpos], debug) qpos += 1 if qcodons[qpos-1]", "is too small, size is returned as a negative if clonesizes[str(k)] > 0:", "number of sequences will be excluded.\"\"\") group.add_argument(\"--sample\", action=\"store\", dest=\"sample_depth\", type=int, default=-1, help=\"\"\"Depth of", "maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer, mask=True): \"\"\" Masks codons split by", "clustering resolveglines = False for c in clones: ngermline = c.getField(\"germline_imgt_d_mask\") if ngermline", "= OrderedDict() log[\"START\"] = \"CLEANING\" log[\"SCOPE\"] = clean printLog(log) todelete = open(outrep) for", "fasta and partition files for each clone. lineages successfully processed records. lineages-fail database", "offending codon receptor.sequence_input = ros[0:(psite + 3)] + ros[(psite + 3 + ins):]", "was due to a frame-shift by repeating this method but with an edited", "in range(0,len(keys)-1): for j in range(i+1,len(keys)): ki = keys[i] kj = keys[j] if", "using --cloned\")) if sites > (len(germline)): seqdiff = sites - len(germline) germline =", "sequence IDs. Splits identical sequences with different meta_data. Returns: tuple: tuple of length", "(bool) : mask split codons for use with igphyml? Returns: str: modified IMGT", "sequences. ncdr3 (bool): if True remove CDR3 logs (dict): contains log information for", "analyze. collapse (bool): if True collapse identical sequences. ncdr3 (bool): if True remove", "IMGT reference, then produces input files for IgPhyML Arguments: db_file (str): input tab-delimited", "0: newseqs[j].append(sequences[j][i:(i+3)]) lcodon = \"\" for i in range(0, sites, 3): if tallies[i//3]", "= \"\\n! IMGT FWR/CDR sequence columns not detected.\\n! Cannot run CDR/FWR partitioned model", "\"\"\" if len(seq1) != len(seq2): printError(\"Sequences are not the same length! %s %s\"", "collapse identical sequences. ncdr3 (bool): if True remove CDR3 logs (dict): contains log", "= \"NNN\" if \"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" + str(len(scodons))", "dest=\"nmask\", help=\"\"\"If specified, do not attempt to mask split codons.\"\"\") group.add_argument(\"--md\", nargs=\"+\", action=\"store\",", "ncounti ambigchar[useqs[kj]] = ncountj # this algorithm depends on the fact that all", "igphyml_group.add_argument(\"--oformat\", action=\"store\", dest=\"oformat\", type=str, default=\"tab\", choices=(\"tab\", \"txt\"), help=\"\"\"IgPhyML output format.\"\"\") igphyml_group.add_argument(\"--nohlp\", action=\"store_true\", dest=\"nohlp\",", "min_seq: for j in range(0, len(clones[str(k)])): logs[clones[str(k)][j].sequence_id][\"FAIL\"] = \"Clone too small: \" +", "something\"s up. log[\"FAIL\"] = \"FAILED_MATCH_QSTRING:\"+str(spos) #figure out if this was due to a", "fields = dedent( \"\"\" output files: <folder> folder containing fasta and partition files", "regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] nseq = r.sequence_imgt[len(simgt):len(r.sequence_imgt)] if len(simgt) < len(r.sequence_imgt): simgt =", "logs[r.sequence_id][\"FAIL\"] = \"FWR/CDR error\" logs[r.sequence_id][\"FWRCDRSEQ\"] = simgt fails[\"seq_fail\"] += 1 fails[\"region_fail\"] += 1", "+ qcodons[qpos], debug) qpos += 1 if qcodons[qpos-1] == scodons[ospos]: #if codon in", "starting position of input sequence in IMGT sequence log (dict): log of information", "log[\"END-MASKED\"] + \",\" + str(len(scodons)) else: log[\"END-MASKED\"] = str(spos) concatenated_seq = Seq(\"\") for", "and scodons[spos] == \"...\": #possible next codon is just a gap spos +=", "codons in input sequence spos (int): starting position of IMGT sequence in input", "spos, qpos)) ospos=spos oqpos=qpos spos += 1 qpos += 1 while spos <", "IgPhyML on outputed data def runIgPhyML(outfile, igphyml_out, clone_dir, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\",", "[] for i in range(0, len(newseqs)): nseq = [] ncdr3 = 0 for", "ids. meta_data (str): Field to append to sequence IDs. Splits identical sequences with", "an edited input sequence if not recursive: frameshifts += checkFrameShifts(receptor, oqpos, ospos, log,", "for seq_f, num in useqs_f.items(): seq = seq_f cid = \"\" if meta_data", "partf.write(\"CDR:IMGT\\n\") partf.write(\"%s\\n\" % (clones[0].v_call.split(\"*\")[0])) partf.write(\"%s\\n\" % (clones[0].j_call.split(\"*\")[0])) partf.write(\",\".join(map(str, imgt))) partf.write(\"\\n\") def outputIgPhyML(clones, sequences,", "runIgPhyML(pass_handle.name, igphyml_out=igphyml_out, clone_dir=clone_dir, nproc=nproc, optimization=optimization, omega=omega, kappa=kappa, motifs=motifs, hotness=hotness, oformat=oformat, nohlp=nohlp,clean=clean,asr=asr) return output", "== \"DUPCOUNT\": cid = delim + \"0\" sid = clones[num].sequence_id.translate(transtable) + \"_1\" +", "from functools import partial # Presto and changeo imports from presto.Defaults import default_out_args", "except: printError(\"igphyml not found :-/\") try: #get GY94 starting topologies p = subprocess.check_output(gy_args)", "into clones first and then predict germlines using --cloned\")) if sites > (len(germline)):", "= deduplicate(useqs_f, clones, logs, meta_data, delim) if collapse and len(useqs_f) < min_seq: for", "remove CDR3 logs (dict): contains log information for each sequence out_dir (str): directory", "return concatenated_seq, log def unAmbigDist(seq1, seq2, fbreak=False): \"\"\" Calculate the distance between two", "in IMGT sequence log (dict): log of information for each sequence debug (bool):", "= simgt fails[\"seq_fail\"] += 1 fails[\"region_fail\"] += 1 return 0 else: #imgt_warn =", "omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", nohlp=False, asr=-1, clean=\"none\"): \"\"\" Run IgPhyML on outputted", "kj.count(\"G\") + kj.count(\"C\") ambigchar[useqs[ki]] = ncounti ambigchar[useqs[kj]] = ncountj # this algorithm depends", "import OrderedDict from textwrap import dedent from time import time from Bio.Seq import", "same clone.\", \"Be sure to cluster sequences into clones first and then predict", "counting only A,T,C,Gs Arguments: seq1 (str): sequence 1 seq2 (str): sequence 2 fbreak", "site if debug: print(\"checking %s at position %d %d\" % (scodons[spos], spos, qpos))", "!= scodons[spos]: scodons[ospos] = \"NNN\" if spos >= s_end: printDebug(\"Masked %s at position", "group = parser.add_argument_group(\"sequence processing arguments\") group.add_argument(\"--collapse\", action=\"store_true\", dest=\"collapse\", help=\"\"\"If specified, collapse identical sequences", "r.sequence_id + \"_\" + r.getField(m) total += maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args,", "identical sequences with different meta_data. Returns: tuple: tuple of length four containing a", "in range(0, sites, 3): if tallies[i//3] > 0: newgerm.append(germline[i:(i+3)]) lcodon=germline[i:(i+3)] imgt.append(imgtar[i]) if len(lcodon)", "scodons[i][1:3] if scodons[i][1:3] != qi[1:3] or scodons[i+1] != qi[3:6]: qi = \"N\" +", "+ \",\" + str(spos) else: log[\"END-MASKED\"] = str(spos) else: printDebug(\"Masked %s at position", "fail_count log[\"NONFUNCTIONAL\"] = fails[\"nf_fail\"] log[\"FRAMESHIFT_DEL\"] = fails[\"del_fail\"] log[\"FRAMESHIFT_INS\"] = fails[\"in_fail\"] log[\"CLONETOOSMALL\"] = fails[\"minseq_fail\"]", "reference, then produces input files for IgPhyML Arguments: db_file (str): input tab-delimited database", "matches += 1 m_match = (matches == len(meta_data)) if dist == 0 and", "in clones: if meta_data is not None: c.setField(meta_data[0],c.getField(meta_data_ar[0])) for m in range(1,len(meta_data_ar)): st", "fails, out_args, fail_writer, mask=True): \"\"\" Masks codons split by alignment to IMGT reference", "information for each sequence. fails (dict): counts of various sequence processing failures. out_args", "None: r.dupcount = 1 fails[\"rec_count\"] += 1 #printProgress(rec_count, rec_count, 0.05, start_time) ptcs =", "dest=\"igphyml\", help=\"\"\"Run IgPhyML on output?\"\"\") igphyml_group.add_argument(\"--nproc\", action=\"store\", dest=\"nproc\", type=int, default=1, help=\"\"\"Number of threads", "{} # id -> useq to join with (least ambiguous chars) ambigchar =", "nsi = nsi + si[i] if si[i] != \".\": ndotgaps.append(0) #find any gaps", "Returns: list: deduplicated receptors within a clone. \"\"\" keys = list(useqs.keys()) join =", "clone_dir, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", nohlp=False, asr=-1, clean=\"none\"): \"\"\" Run", "not recursive: log[\"FRAMESHIFTS\"] = frameshifts if len(scodons[-1]) != 3: if scodons[-1] == \"..\"", "+= r.dupcount if r.clone in clones: clones[r.clone].append(r) cloneseqs[r.clone].append(mask_seq) else: clones[r.clone] = [r] cloneseqs[r.clone]", "p = subprocess.check_output(hlp_args) except subprocess.CalledProcessError as e: print(\" \".join(hlp_args)) print('error>', e.output, '<') printError(\"HLP", "Arguments: scodons (list): list of codons in IMGT sequence. qi (str) : input", "len(useqs_f) else: return nseqs def maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer, mask=True):", "receptors within a clone (index is value in useqs dict). log (collections.OrderedDict): log", "log[names[i]] = round(float(vals[i]),2) printLog(log) if clean != \"none\": log = OrderedDict() log[\"START\"] =", "(bool): if True collapse identical sequences. ncdr3 (bool): if True remove all CDR3s.", "!= \"...\": if scodons[i][0:2] == \"..\": scodons[i] = \"NN\" + scodons[i][2] #sometimes IMGT", "== 2: newgerm[-1] = newgerm[-1] + \"N\" elif len(lcodon) == 1: newgerm[-1] =", "forward until you find a codon that matches next site if debug: print(\"checking", "partfile = os.path.join(clone_dir, \"%s.part.txt\" % key) if clonesizes[key] > 0: germ_id = [\"GERM\"]", "resolveglines = False for c in clones: ngermline = c.getField(\"germline_imgt_d_mask\") if ngermline is", "\"-\"))) else: for j in range(0, nseqs): cid = \"\" if meta_data is", "newseqs[j]])+delim+\":\".join(meta_data_list) else: conseq_f = conseq if conseq_f in useqs_f and collapse: clones[useqs_f[conseq_f]].dupcount +=", "{} big_enough = [] all_records = [] found_no_funct = False for r in", "if both are the same, move both forward spos += 1 qpos +=", "nseqs, delim, newgerm, conseqs, duplicate, imgt): \"\"\" Create intermediate sequence alignment and partition", "partial # Presto and changeo imports from presto.Defaults import default_out_args from presto.IO import", "logs[clones[j].sequence_id][\"DUPLICATE\"]=True if fail_writer is not None: fail_writer.writeReceptor(clones[j]) else: useqs_f[conseq_f] = j conseqs.append(conseq) if", "os.remove(lsplit[3]) todelete.close() os.remove(outrep) os.remove(outfile) os.remove(gyout) cilog = outrep + \"_igphyml_CIlog.txt_hlp\" if os.path.isfile(cilog): os.remove(cilog)", "(l) and parameters (r), or nothing (n), for IgPhyML.\"\"\") igphyml_group.add_argument(\"--omega\", action=\"store\", dest=\"omega\", type=str,", "igphyml_out=igphyml_out, clone_dir=clone_dir, nproc=nproc, optimization=optimization, omega=omega, kappa=kappa, motifs=motifs, hotness=hotness, oformat=oformat, nohlp=nohlp,clean=clean,asr=asr) return output def", "to sequence IDs. Splits identical sequences with different meta_data. meta_data (str): Field to", "str(nproc), \"-o\", optimization, \"--omega\", omega, \"-t\", kappa, \"--motifs\", motifs, \"--hotness\", hotness, \"--oformat\", oformat,", "log[\"CDRFWR_ERROR\"] = fails[\"region_fail\"] log[\"GERMLINE_PTC\"] = fails[\"germlineptc\"] log[\"OTHER_FAIL\"] = fails[\"other_fail\"] if collapse: log[\"DUPLICATE\"] =", "2 fbreak (bool): break after first difference found? Returns: int: number of ACGT", "if scodons[i][0:2] == \"..\": scodons[i] = \"NN\" + scodons[i][2] #sometimes IMGT will just", "Start processing clones clonesizes = {} pass_count, nclones = 0, 0 printMessage(\"Processing clones\",", "open(outrep) for line in todelete: line = line.rstrip(\"\\n\") line = line.rstrip(\"\\r\") lsplit =", "\" + str(len(germline))) #if len(germline) < len(sequences[i]): # print(\"\\n\" + str(clones[i].sequence_id)) # print(\"\\n", "= len(imgtar) for j in range(0,len(sequences)): if len(sequences[j]) > maxlen: maxlen = len(sequences[j])", "r.sequence_id.replace(\")\",\"-\") #remove parenthesis from sequence ID r.sequence_id = r.sequence_id.replace(\"(\",\"-\") #remove parenthesis from sequence", "dest=\"target_clones\", help=\"\"\"List of clone IDs to output, if specified.\"\"\") group.add_argument(\"--minseq\", action=\"store\", dest=\"min_seq\", type=int,", "len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or simgt != r.sequence_imgt: log = OrderedDict() log[\"ID\"] = r.sequence_id", "!= qcodons[qpos]: printDebug(\"Checking \" + scodons[spos]+ \"\\t\" + qcodons[qpos], debug) qpos += 1", "append to sequence IDs. Splits identical sequences with different meta_data target_clones (str): List", "ID r.sequence_id = r.sequence_id.replace(\")\",\"-\") #remove parenthesis from sequence ID r.sequence_id = r.sequence_id.replace(\"(\",\"-\") #remove", "not os.path.exists(clone_dir): os.makedirs(clone_dir) # Format options try: reader, writer, __ = getFormatOperators(format) except", "is \"\": ngermline = c.getField(\"germline_imgt\") if ngermline != germline: resolveglines = True if", "except ValueError: printError(\"Invalid format %s.\" % format) out_fields = getDbFields(db_file, reader=reader) # open", "and regions[\"cdr3_imgt\"] is not None: simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] +", "IgPhyML on output?\"\"\") igphyml_group.add_argument(\"--nproc\", action=\"store\", dest=\"nproc\", type=int, default=1, help=\"\"\"Number of threads to parallelize", "+= r.dupcount if mout[1][\"FAIL\"] == \"FRAME-SHIFTING DELETION\": fails[\"del_fail\"] += 1 elif mout[1][\"FAIL\"] ==", "ris return frameshifts def findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log, debug, recursive=False):", "width=50) #subsampling loop init_clone_sizes = {} big_enough = [] all_records = [] found_no_funct", "duplicate (bool) : duplicate sequence if only one in a clone. imgt (list)", "output file nproc (int): Number of threads to parallelize IgPhyML across optimization (str):", "to sequence_id igphyml (bool): If True, run IgPhyML on outputted data nproc (int)", "Field to append to sequence IDs. Splits identical sequences with different meta_data. clones", "analysis\" printLog(log) # Note: Collapse can give misleading dupcount information if some sequences", "for m in meta_data: if ri.getField(m) == rj.getField(m) and m != \"DUPCOUNT\": matches", "= c.getField(meta_data[0])+\":\"+c.getField(meta_data_ar[m]) c.setField(meta_data[0],st) if len(c.getField(\"imgtpartlabels\")) != len(imgtar): printError(\"IMGT assignments are not the same", "0: hlp_args.append(\"--ASRc\") hlp_args.append(str(asr)) log = OrderedDict() log[\"START\"] = \"IgPhyML GY94 tree estimation\" printLog(log)", "%s %s\" % (seq1, seq2)) dist = 0 for i in range(0,len(seq1)): if", "+ qcodons[qpos]) if scodons[spos] == \"...\" and qcodons[qpos] != \"...\": #if IMGT gap,", "conseqs[j].replace(\".\", \"-\"))) if nseqs == 1 and duplicate: if meta_data is not None:", "in clones. clones (list): list of Receptor objects. meta_data (str): Field to append", "not None: if append is not None: for m in append: r.sequence_id =", "amino_acid=False) regions = rd.getRegions(r.sequence_imgt) if regions[\"cdr3_imgt\"] is not \"\" and regions[\"cdr3_imgt\"] is not", "IMGT sequence in input sequence s_end (int): end of IMGT sequence qpos (int):", "elif scodons[i][0] == \".\": scodons[i] = \"N\" + scodons[i][1:3] if scodons[i][1:3] != qi[1:3]", "1 while spos < s_end and scodons[spos] == \"...\": #possible next codon is", "in meta_data: if ri.getField(m) == rj.getField(m) and m != \"DUPCOUNT\": matches += 1", "printDebug(receptor.sequence_imgt, debug) printDebug(\"RUNNING %d\" % ins, debug) mout = maskSplitCodons(receptor, recursive=True) if mout[1][\"PASS\"]:", "sequence IDs. Splits identical sequences with different meta_data collapse (bool): if True collapse", "with the fact that it's possible to start mid-codon qi,spos = correctMidCodonStart(scodons, qi,", "criteria.\",1) if sample_depth > 0: random.shuffle(big_enough) total = 0 for r in big_enough:", "data nproc (int) : Number of threads to parallelize IgPhyML across optimization (str):", "receptors[useqs[kj]] dist = unAmbigDist(ski, skj, True) m_match = True if meta_data is not", "gaps.append(0) nsi = nsi + si[i] if si[i] != \".\": ndotgaps.append(0) #find any", "germline of a clone Arguments: sequences (list): list of sequences in clones. clones", "range(0, len(sequence), 3): if sequence[i:(i+3)] in ptcs: return i return -1 def rmCDR3(sequences,", "out_name=out_args[\"out_name\"], out_type=out_args[\"out_type\"]) fail_writer = writer(fail_handle, fields=out_fields) cloneseqs = {} clones = {} logs", "useqs_f (dict): unique sequences mapped to ids. meta_data (str): Field to append to", "str(clonesizes[key])) outfile = os.path.join(clone_dir, \"%s.fasta\" % key) partfile = os.path.join(clone_dir, \"%s.part.txt\" % key)", "and partition files for IgPhyML output Arguments: clones (list): receptor objects within the", "textwrap import dedent from time import time from Bio.Seq import Seq from functools", "each sequence. debug (bool) : print debugging statements. \"\"\" frameshifts = 0 for", "and len(useqs_f) < min_seq: for seq_f, num in useqs_f.items(): logs[clones[num].sequence_id][\"FAIL\"] = \"Clone too", "for a particular sequence. clones (list): list of receptors. cloneseqs (list): list of", "-> useq to join with (least ambiguous chars) ambigchar = {} #sequence id", "Identify junction region by IMGT definition. Arguments: receptor (changeo.Receptor.Receptor): Receptor object. recursive (bool)", "to mask split codons.\"\"\") group.add_argument(\"--md\", nargs=\"+\", action=\"store\", dest=\"meta_data\", help=\"\"\"List of fields to containing", "last = cimgt[-1] cimgt.extend([last]*(imgtdiff)) clones[j].setField(\"imgtpartlabels\",cimgt) if meta_data is not None: meta_data_ar = meta_data[0].split(\",\")", "len(imgtar) sites = maxlen for j in range(0,len(sequences)): cimgt = clones[j].getField(\"imgtpartlabels\") seqdiff =", "at position %d\" % (scodons[ospos], ospos), debug) scodons[ospos] = \"NNN\" if \"MASKED\" in", "\"Germline PTC\" fails[\"seq_fail\"] += 1 fails[\"germlineptc\"] += 1 return 0 if r.functional and", "parameters format (str): input and output format. out_args (dict): arguments for output preferences.", "= True all_records.append(r) if r.clone in init_clone_sizes: init_clone_sizes[r.clone] += 1 else: init_clone_sizes[r.clone] =", "is not \"\" and regions[\"fwr3_imgt\"] is not None: simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"]", "sequences in the same clone.\", \"Be sure to cluster sequences into clones first", "= \"PTC_ADDED_FROM_MASKING\" logs[mout[1][\"ID\"]] = mout[1] if mout[1][\"PASS\"]: #passreads += r.dupcount if r.clone in", "= os.path.join(out_dir, \"%s.fasta\" % clones[0].clone) with open(outfile, \"w\") as clonef: if collapse: for", "processing arguments\") group.add_argument(\"--collapse\", action=\"store_true\", dest=\"collapse\", help=\"\"\"If specified, collapse identical sequences before exporting to", "logs, meta_data, delim) if collapse and len(useqs_f) < min_seq: for seq_f, num in", "1 return 0 if r.functional and ptcs < 0: #If IMGT regions are", "= [\"igphyml\",\"--repfile\", outrep, \"-m\", \"HLP\", \"--run_id\", \"hlp\", \"--threads\", str(nproc), \"-o\", optimization, \"--omega\", omega,", "+ \\ regions[\"fwr3_imgt\"] nseq = r.sequence_imgt[len(simgt):len(r.sequence_imgt)] if len(simgt) < len(r.sequence_imgt): simgt = regions[\"fwr1_imgt\"]", "next codon over in the #alignment if scodons[i][2:3] != qi[2:3] or scodons[i +", "outrep, \"-m\", \"HLP\", \"--run_id\", \"hlp\", \"--threads\", str(nproc), \"-o\", optimization, \"--omega\", omega, \"-t\", kappa,", "elif regions[\"fwr3_imgt\"] is not \"\" and regions[\"fwr3_imgt\"] is not None: simgt = regions[\"fwr1_imgt\"]", "specified required fields: sequence_id, sequence, sequence_alignment, germline_alignment_d_mask or germline_alignment, v_call, j_call, clone_id, v_sequence_start", "per clone append (str): column name to append to sequence_id igphyml (bool): If", "(clones[0].clone,\"_\".join(germ_id))) for i in range(0, len(newgerm)): clonef.write(\"%s\" % newgerm[i].replace(\".\",\"-\")) clonef.write(\"\\n\") #output partition file", "if c.getField(\"imgtpartlabels\")[j] != imgtar[j]: printError(\"IMGT assignments are not the same within clone %d!\\n\"", "3 != 0 : printDebug(\"Frame-shifting gap detected! Refusing to include sequence.\", debug) log[\"PASS\"]", "len(scodons), scodons[s_end]), debug) s_end += 1 qpos = 0 if mask: findAndMask(receptor, scodons,", "outfile = os.path.join(out_dir, \"%s.fasta\" % clones[0].clone) with open(outfile, \"w\") as clonef: if collapse:", "fail_writer = None, None if out_args[\"failed\"]: fail_handle = getOutputHandle(db_file, out_label=\"lineages-fail\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=out_args[\"out_type\"])", "1 for r in all_records: if target_clones is None or r.clone in target_clones:", "out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=oformat) dir_name, __ = os.path.split(pass_handle.name) if out_args[\"out_name\"] is None: __, clone_name,", "point #we have to shift the frame. This attempts to correct for this", "sequence in input sequence. debug (bool) : print debugging statements. Returns: tuple: (modified", "0: germ_id = [\"GERM\"] if meta_data is not None: for i in range(1,", "data sequences to include. Returns: int: number of clones. \"\"\" s = \"\"", "!= \"...\": #if IMGT gap, move forward in imgt spos += 1 elif", "printWarning(\"FUNCTIONAL column not found.\") found_no_funct = True r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from", "type=str, default=\"e,e\", choices = (\"e\", \"ce\", \"e,e\", \"ce,e\", \"e,ce\", \"ce,ce\"), help=\"\"\"Omega parameters to", ">= 0: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"SEQ_IN\"] =", "len(r.sequence_imgt) r.setField(\"imgtpartlabels\", imgtpartlabels) mout = maskSplitCodons(r, mask=mask) mask_seq = mout[0] ptcs = hasPTC(mask_seq)", "- len(sequences[j]) imgtdiff = len(imgtar)-len(cimgt) sequences[j] = sequences[j] + \"N\"*(seqdiff) last = cimgt[-1]", "IgPhyML (tab or txt) nohlp (bool): If True, only estimate GY94 trees and", "optimization in IgPhyML (--omega) kappa (str): kappa optimization in IgPhyML (-t) motifs (str):", "(see igphyml -h for details)\") igphyml_group.add_argument(\"--igphyml\", action=\"store_true\", dest=\"igphyml\", help=\"\"\"Run IgPhyML on output?\"\"\") igphyml_group.add_argument(\"--nproc\",", "if ncountj > ncounti: nci = 0 if useqs[ki] in join: nci =", "if len(useqs_f) == 1 and duplicate: if meta_data is not None: if meta_data[0]", "0 s_end = 0 #adjust for the fact that IMGT sequences can end", "debug) s_end += 1 qpos = 0 if mask: findAndMask(receptor, scodons, qcodons, spos,", "None: if meta_data[0] == \"DUPCOUNT\": cid = delim + \"0\" sid = clones[num].sequence_id.translate(transtable)", "\"\"\" Find and mask split codons Arguments: receptor (changeo.Receptor.Receptor): Receptor object. scodons (list):", "recursive) if not log[\"PASS\"] and not recursive: log[\"FRAMESHIFTS\"] = frameshifts if len(scodons[-1]) !=", "out_args, fail_writer, mask=True): \"\"\" Masks codons split by alignment to IMGT reference Arguments:", "0: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"SEQ_IN\"] = r.sequence_input", "for each sequence. debug (bool) : print debugging statements. \"\"\" frameshifts = 0", "== \"FRAME-SHIFTING DELETION\": fails[\"del_fail\"] += 1 elif mout[1][\"FAIL\"] == \"SINGLE FRAME-SHIFTING INSERTION\": fails[\"in_fail\"]", "found_no_funct is False: printWarning(\"FUNCTIONAL column not found.\") found_no_funct = True all_records.append(r) if r.clone", "regions[\"fwr3_imgt\"] is not None: simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"]", "= \"Germline PTC\" fails[\"seq_fail\"] += 1 fails[\"germlineptc\"] += 1 return 0 if r.functional", "sample_depth > 0: random.shuffle(big_enough) total = 0 for r in big_enough: if r.functional", "CDR3s. nmask (bool): if False, do not attempt to mask split codons sample_depth", "receptor.sequence_imgt = ris[0:(pisite + 3)] + ris[(pisite + 3):] # Debug sequence modifications", "is not None else None log[\"RECORDS\"] = fails[\"totalreads\"] log[\"INITIAL_FILTER\"] = fails[\"rec_count\"] log[\"PASS\"] =", "deduplicate sequences. nseqs (int): number of sequences. delim (str) : delimiter for extracting", "duplicate = True # duplicate sequences in clones with only 1 sequence? imgtar,", "IDs. Splits identical sequences with different meta_data. clones (list) : list of receptor", "len(clones[str(k)])): logs[clones[str(k)][j].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(cloneseqs[str(k)])) logs[clones[str(k)][j].sequence_id][\"PASS\"] = False clonesizes[str(k)]", "\"NONFUNCTIONAL/PTC\" log[\"SEQ_IN\"] = r.sequence_input logs[r.sequence_id] = log if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1", "fail_handle.close() if log_handle is not None: log_handle.close() #printProgress(rec_count, rec_count, 0.05, start_time) log =", "= parser.add_argument_group(\"sequence processing arguments\") group.add_argument(\"--collapse\", action=\"store_true\", dest=\"collapse\", help=\"\"\"If specified, collapse identical sequences before", "at which point we\"ll just want to mask the #first codon in the", "are zero # distance from the sequence they will be collapse to. if", "deduplication).\"\"\") group.add_argument(\"--append\", nargs=\"+\", action=\"store\", dest=\"append\", help=\"\"\"List of columns to append to sequence ID", "+ cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) germ_id = [\"GERM\"] if meta_data is", "particular sequence. clones (list): list of receptors. cloneseqs (list): list of masked clone", "append is not None: if append is not None: for m in append:", "parenthesis from metadata r.setField(meta_data[m],md) if append is not None: if append is not", "if collapse: for seq_f, num in useqs_f.items(): seq = seq_f cid = \"\"", "(dict): arguments for output preferences. Returns: dict: dictionary of output pass and fail", "= md.replace(\",\",\"-\") #remove commas from metadata md = md.replace(\")\",\"-\") #remove parenthesis from metadata", "-= clonesizes[str(k)] fails[\"minseq_fail\"] -= clonesizes[str(k)] fail_count = fails[\"rec_count\"] - pass_count # End clone", "\"ce\", \"e,e\", \"ce,e\", \"e,ce\", \"ce,ce\"), help=\"\"\"Omega parameters to estimate for FWR,CDR respectively: e", "done before clonal clustering resolveglines = False for c in clones: ngermline =", "of Receptor objects. \"\"\" for i in range(0,len(sequences)): imgtar = clones[i].getField(\"imgtpartlabels\") germline =", "delim=\":\"): \"\"\" Collapses identical sequences Argument: useqs (dict): unique sequences within a clone.", "\", \"_\") outfile = os.path.join(out_dir, \"%s.fasta\" % clones[0].clone) with open(outfile, \"w\") as clonef:", "with open(outfile, \"w\") as clonef: if collapse: for seq_f, num in useqs_f.items(): seq", "remove gap only sites from observed data newgerm = [] imgt = []", "-1 def rmCDR3(sequences, clones): \"\"\" Remove CDR3 from all sequences and germline of", "dedent from time import time from Bio.Seq import Seq from functools import partial", "receptors within a clone. \"\"\" keys = list(useqs.keys()) join = {} # id", "printLog, printMessage, printWarning, printError, printDebug from changeo.Defaults import default_format from changeo.IO import splitName,", "appending meta_data. Returns: list: deduplicated receptors within a clone. \"\"\" keys = list(useqs.keys())", "= 0 #adjust for the fact that IMGT sequences can end on gaps", "= False for seqi in range(0, len(sequences)): i = sequences[seqi] if len(i) !=", "outfile (str): Output file name. igphymlout (str): igphyml output file nproc (int): Number", "= motifs log[\"HOTNESS\"] = hotness log[\"NPROC\"] = nproc printLog(log) if not nohlp: try:", "1 else: fails[\"other_fail\"] += 1 else: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"]", "receptor.sequence_imgt, log else: curgap = 0 si = nsi scodons = [si[i:i +", "import subprocess import multiprocessing as mp from argparse import ArgumentParser from collections import", "cid = seq_f.split(delim) cid = delim + cid.replace(\":\", \"_\") sid = clones[num].sequence_id.translate(transtable) +", "that it's possible to start mid-codon qi,spos = correctMidCodonStart(scodons, qi, debug) qcodons =", "log[\"PASS\"]=False log[\"FAIL\"]=\"FAILED_MATCH:\"+str(spos) elif qcodons[qpos] == scodons[spos]: printDebug(\"Masked %s at position %d\" % (scodons[ospos],", "is not None: log_handle.close() #printProgress(rec_count, rec_count, 0.05, start_time) log = OrderedDict() log[\"OUTPUT\"] =", "found.\") found_no_funct = True all_records.append(r) if r.clone in init_clone_sizes: init_clone_sizes[r.clone] += 1 else:", "sequence. ospos (int) : position of interest in IMGT sequence. log (dict) :", "CDR3 from all sequences.\"\"\") group.add_argument(\"--nmask\", action=\"store_true\", dest=\"nmask\", help=\"\"\"If specified, do not attempt to", "= \"BuildTrees\" printLog(log) #Run IgPhyML on outputted data? if igphyml: runIgPhyML(pass_handle.name, igphyml_out=igphyml_out, clone_dir=clone_dir,", "out_label = \"lineages\" pass_handle = getOutputHandle(db_file, out_label=out_label, out_dir=out_args[\"out_dir\"], out_name= out_args[\"out_name\"], out_type=\"tsv\") igphyml_out =", "= maxlen for j in range(0,len(sequences)): cimgt = clones[j].getField(\"imgtpartlabels\") seqdiff = maxlen -", "fewer than the specified number of sequences will be excluded.\"\"\") group.add_argument(\"--sample\", action=\"store\", dest=\"sample_depth\",", "+ str(len(conseqs)) logs[clones[j].sequence_id][\"PASS\"] = False return -len(conseqs) # Output fasta file of masked,", "cid = keys[j].split(delim) ri = receptors[useqs[ki]] rj = receptors[useqs[kj]] dist = unAmbigDist(ski, skj,", "getOutputName from changeo.Alignment import RegionDefinition from changeo.Commandline import CommonHelpFormatter, checkArgs, getCommonArgParser, parseCommonArgs def", "else: log[\"PASS\"] = False log[\"FAIL\"] = \"UNKNOWN\" def maskSplitCodons(receptor, recursive=False, mask=True): \"\"\" Identify", "into IgPhyML input files \"\"\" # Info __author__ = \"<NAME>\" from changeo import", "\"N\"*(seqdiff) last = cimgt[-1] cimgt.extend([last]*(imgtdiff)) clones[j].setField(\"imgtpartlabels\",cimgt) if meta_data is not None: meta_data_ar =", "presto.Defaults import default_out_args from presto.IO import printLog, printMessage, printWarning, printError, printDebug from changeo.Defaults", "osplit = outfile.split(\".\") outrep = \".\".join(osplit[0:(len(osplit)-1)]) + \"_gy.tsv\" gyout = outfile + \"_igphyml_stats_gy.txt\"", "subprocess.CalledProcessError as e: print(\" \".join(hlp_args)) print('error>', e.output, '<') printError(\"HLP tree building failed\") log", "sequence of the first receptor in clones, the length of the first sequence", "= clones[i].getField(\"imgtpartlabels\") germline = clones[i].getField(\"germline_imgt_d_mask\") nseq = [] nimgtar = [] ngermline =", "first letter if non-match, at which point we\"ll just want to mask the", "column not found.\") found_no_funct = True r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from sequence", "matches = 0 for m in meta_data: if ri.getField(m) == rj.getField(m) and m", "= receptors[join[useqs[k]]] rto.dupcount += rfrom.dupcount if log is not None: log[rfrom.sequence_id][\"PASS\"] = False", "log[\"FRAMESHIFT_DEL\"] = fails[\"del_fail\"] log[\"FRAMESHIFT_INS\"] = fails[\"in_fail\"] log[\"CLONETOOSMALL\"] = fails[\"minseq_fail\"] log[\"CDRFWR_ERROR\"] = fails[\"region_fail\"] log[\"GERMLINE_PTC\"]", "too small, size is returned as a negative if clonesizes[str(k)] > 0: nclones", "#print(\"Length: \" + str(ncdr3)) useqs_f = OrderedDict() conseqs = [] for j in", "%d, but couldn't find upstream match\" % (scodons[ospos], ospos), debug) log[\"PASS\"]=False log[\"FAIL\"]=\"FAILED_MATCH:\"+str(spos) elif", "modifications printDebug(ros, debug) printDebug(receptor.sequence_input, debug) printDebug(ris, debug) printDebug(receptor.sequence_imgt, debug) printDebug(\"RUNNING %d\" % ins,", "ospos=spos oqpos=qpos spos += 1 qpos += 1 while spos < s_end and", "(bool) : duplicate sequence if only one in a clone. imgt (list) :", "clonesizes[key] > 0: germ_id = [\"GERM\"] if meta_data is not None: for i", "ncon_seq = ncon_seq + concatenated_seq[counter] counter += 1 ncon_seq = ncon_seq + concatenated_seq[counter:]", "regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] +", "len(sequences) imgtar = clones[0].getField(\"imgtpartlabels\") germline = clones[0].getField(\"germline_imgt_d_mask\") if germline is \"\": germline =", "join = {} # id -> sequence id to join with (least ambiguous", "[] nimgtar = [] ngermline = [] ncdr3 = 0 #print(\"imgtarlen: \" +", "type=str, default=\"e,e,e,e,e,e\", help=\"\"\"Mutability parameters to estimate: e = estimate, ce = estimate +", "and germline of a clone Arguments: sequences (list): list of sequences in clones.", "if isinstance(clones[j].getField(meta_data[m]), str): clones[j].setField(meta_data[m],clones[j].getField(meta_data[m]).replace(\"_\", \"\")) meta_data_list.append(str(clones[j].getField(meta_data[m]))) conseq_f = \"\".join([str(seq_rec) for seq_rec in newseqs[j]])+delim+\":\".join(meta_data_list)", "that SEQ-IMGT ends on a bunch of Ns qpos += 1 spos +=", "same within clone %d!\\n\" % c.clone, False) printError(c.getField(\"imgtpartlabels\"), False) printError(\"%s\\n%d\\n\" % (imgtar, j))", "optimization (str): Optimize combination of topology (t) branch lengths (l) and parameters (r)", "= \"SINGLE FRAME-SHIFTING INSERTION\" break else: receptor.sequence_input = ros receptor.sequence_imgt = ris return", "imgt = nimgt #print(\"Length: \" + str(ncdr3)) useqs_f = OrderedDict() conseqs = []", "collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt) if collapse: return len(useqs_f) else: return", "position is equal to original codon, it was preserved qpos -= 1 spos", "motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", clean=\"none\", nohlp=False, asr=-1, format=default_format, out_args=default_out_args): \"\"\" Masks codons split by", "out_args (dict): arguments for output preferences. fail_writer (changeo.IO.TSVWriter): failed sequences writer object. Returns:", "= log[\"END-MASKED\"] + \",\" + str(len(scodons)) else: log[\"END-MASKED\"] = str(spos) concatenated_seq = Seq(\"\")", "each sequence. fails (dict): counts of various sequence processing failures. out_args (dict): arguments", "if len(sequences[j]) > maxlen: maxlen = len(sequences[j]) if len(clones[j].getField(\"imgtpartlabels\")) > maximgt: imgtar =", "but with an edited input sequence if not recursive: frameshifts += checkFrameShifts(receptor, oqpos,", "action=\"store\", dest=\"motifs\", type=str, default=\"WRC_2:0,GYW_0:1,WA_1:2,TW_0:3,SYC_2:4,GRS_0:5\", help=\"\"\"Which motifs to estimate mutability.\"\"\") igphyml_group.add_argument(\"--hotness\", action=\"store\", dest=\"hotness\", type=str,", "sequence qpos (int): starting position of input sequence in IMGT sequence log (dict):", "imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or simgt != r.sequence_imgt: log = OrderedDict() log[\"ID\"]", "produces input files for IgPhyML Arguments: db_file (str): input tab-delimited database file. meta_data", "analysis\" log[\"OPTIMIZE\"] = optimization log[\"TS/TV\"] = kappa log[\"wFWR,wCDR\"] = omega log[\"MOTIFS\"] = motifs", "< len(r.sequence_imgt): simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] +", "1 clones[i].setField(\"imgtpartlabels\",nimgtar) clones[i].setField(\"germline_imgt_d_mask\", \"\".join(ngermline)) sequences[i] = \"\".join(nseq) #print(\"Length: \" + str(ncdr3)) def characterizePartitionErrors(sequences,", "parameters clean (str): delete intermediate files? (none, all) \"\"\" osplit = outfile.split(\".\") outrep", "for c in clones: if meta_data is not None: c.setField(meta_data[0],c.getField(meta_data_ar[0])) for m in", "was done before clonal clustering resolveglines = False for c in clones: ngermline", "the same among sequences in the same clone.\", \"Be sure to cluster sequences", "width=50) for k in clones.keys(): if len(clones[str(k)]) < min_seq: for j in range(0,", "str((germline))) for j in range(0,len(imgtar)): if imgtar[j] != 108: nseq.append(sequences[i][j]) if j <", "= regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"]", "GY94 starting topologies p = subprocess.check_output(gy_args) except subprocess.CalledProcessError as e: print(\" \".join(gy_args)) print('error>',", "!= \"-\" and seq1[i] != \".\": if seq2[i] != \"N\" and seq2[i] !=", "ngermline = c.getField(\"germline_imgt_d_mask\") if ngermline is \"\": ngermline = c.getField(\"germline_imgt\") if ngermline !=", "nucleotides for i in range(0,len(keys)-1): for j in range(i+1,len(keys)): ki = keys[i] kj", "alignment and partition files for IgPhyML output Arguments: clones (list): receptor objects within", "receptors (dict): receptors within a clone (index is value in useqs dict). log", "same length! %s %s\" % (seq1, seq2)) dist = 0 for i in", "mout[1][\"PASS\"] = False mout[1][\"FAIL\"] = \"PTC_ADDED_FROM_MASKING\" logs[mout[1][\"ID\"]] = mout[1] if mout[1][\"PASS\"]: #passreads +=", "fails[\"minseq_fail\"] -= clonesizes[str(k)] fail_count = fails[\"rec_count\"] - pass_count # End clone processing printMessage(\"Done\",", "needs to be corrected\") for j in range(0,len(imgtar)): if c.getField(\"imgtpartlabels\")[j] != imgtar[j]: printError(\"IMGT", "default=\"tab\", choices=(\"tab\", \"txt\"), help=\"\"\"IgPhyML output format.\"\"\") igphyml_group.add_argument(\"--nohlp\", action=\"store_true\", dest=\"nohlp\", help=\"\"\"Don't run HLP model?\"\"\")", "dest=\"hotness\", type=str, default=\"e,e,e,e,e,e\", help=\"\"\"Mutability parameters to estimate: e = estimate, ce = estimate", "range(1,len(meta_data_ar)): st = c.getField(meta_data[0])+\":\"+c.getField(meta_data_ar[m]) c.setField(meta_data[0],st) if len(c.getField(\"imgtpartlabels\")) != len(imgtar): printError(\"IMGT assignments are not", "IgPhyML. omega (str): omega optimization in IgPhyML (--omega) kappa (str): kappa optimization in", "IMGT will just cut off first letter if non-match, at which point we\"ll", "germ_id.append(\"GERM\") pass_handle.write(\"%s\\t%s\\t%s_%s\\t%s\\n\" % (outfile, \"N\", key,\"_\".join(germ_id), partfile)) handle.close() output = {\"pass\": None, \"fail\":", "None: simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"]", "if not PTCs, position of PTC if found. \"\"\" ptcs = (\"TAA\", \"TGA\",", "debugging statements. \"\"\" frameshifts = 0 for ins in range(1, 3): ros =", "this algorithm depends on the fact that all sequences are compared pairwise, and", "#remove colons from metadata md = md.replace(\",\",\"-\") #remove commas from metadata md =", "line.rstrip(\"\\r\") lsplit = line.split(\"\\t\") if len(lsplit) == 4: os.remove(lsplit[0]) os.remove(lsplit[1]) os.remove(lsplit[3]) todelete.close() os.remove(outrep)", "* len( regions[\"fwr2_imgt\"]) + \\ [60] * len(regions[\"cdr2_imgt\"]) + [80] * len(regions[\"fwr3_imgt\"]) +", "= False log[\"FAIL\"] = \"NONFUNCTIONAL/PTC\" log[\"SEQ_IN\"] = r.sequence_input logs[r.sequence_id] = log if out_args[\"failed\"]:", "(index is value in useqs dict). log (collections.OrderedDict): log of sequence errors. meta_data", "across.\"\"\") igphyml_group.add_argument(\"--clean\", action=\"store\", choices=(\"none\", \"all\"), dest=\"clean\", type=str, default=\"none\", help=\"\"\"Delete intermediate files? none: leave", "\"\"\" Defines the ArgumentParser Returns: argparse.ArgumentParser: argument parsers. \"\"\" # Define input and", "Arguments: sequences (list): list of sequences in clones. clones (list): list of Receptor", "Arguments: outfile (str): Output file name. igphymlout (str): igphyml output file nproc (int):", "frameshifts if len(scodons[-1]) != 3: if scodons[-1] == \"..\" or scodons[-1] == \".\":", "return parser if __name__ == \"__main__\": \"\"\" Parses command line arguments and calls", "absent from the query, at which point #we have to shift the frame.", "0 for i in range(0, len(scodons)): printDebug(\"%s %s\" % (scodons[i], qi[0:3]), debug) if", "minimum number of sequences per clone append (str): column name to append to", "processing clones clonesizes = {} pass_count, nclones = 0, 0 printMessage(\"Processing clones\", start_time=start_time,", "[] for m in range(0,len(meta_data)): if isinstance(clones[j].getField(meta_data[m]), str): clones[j].setField(meta_data[m],clones[j].getField(meta_data[m]).replace(\"_\", \"\")) meta_data_list.append(str(clones[j].getField(meta_data[m]))) conseq_f =", "True # duplicate sequences in clones with only 1 sequence? imgtar, germline, sites,", "elif not collapse and len(conseqs) < min_seq: for j in range(0, nseqs): logs[clones[j].sequence_id][\"FAIL\"]", "open(outfile, \"w\") as clonef: if collapse: for seq_f, num in useqs_f.items(): seq =", "in range(1,len(meta_data_ar)): st = c.getField(meta_data[0])+\":\"+c.getField(meta_data_ar[m]) c.setField(meta_data[0],st) if len(c.getField(\"imgtpartlabels\")) != len(imgtar): printError(\"IMGT assignments are", "= len(imgtar) sites = maxlen for j in range(0,len(sequences)): cimgt = clones[j].getField(\"imgtpartlabels\") seqdiff", "{} # id -> sequence id to join with (least ambiguous chars) joinseqs", "= os.path.basename(pass_handle.name) if pass_handle is not None else None log[\"RECORDS\"] = fails[\"totalreads\"] log[\"INITIAL_FILTER\"]", "m in range(0,len(meta_data)): if isinstance(clones[j].getField(meta_data[m]), str): clones[j].setField(meta_data[m],clones[j].getField(meta_data[m]).replace(\"_\", \"\")) meta_data_list.append(str(clones[j].getField(meta_data[m]))) conseq_f = \"\".join([str(seq_rec) for", "conseq_f = \"\".join([str(seq_rec) for seq_rec in newseqs[j]])+delim+\":\".join(meta_data_list) else: conseq_f = conseq if conseq_f", "that IMGT sequences can end on gaps for i in range(spos, len(scodons)): if", "method part of a recursive call? mask (bool) : mask split codons for", "meta_data_ar = meta_data[0].split(\",\") for c in clones: if meta_data is not None: c.setField(meta_data[0],c.getField(meta_data_ar[0]))", "log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"PASS\"] = False log[\"FAIL\"] = \"NONFUNCTIONAL/PTC\" log[\"SEQ_IN\"]", "found. \"\"\" ptcs = (\"TAA\", \"TGA\", \"TAG\", \"TRA\", \"TRG\", \"TAR\", \"TGR\", \"TRR\") for", "False log[\"FAIL\"] = \"UNKNOWN\" def maskSplitCodons(receptor, recursive=False, mask=True): \"\"\" Identify junction region by", "# Imports import os import random import subprocess import multiprocessing as mp from", "# Mask codons split by indels start_time = time() printMessage(\"Correcting frames and indels", "fails[\"region_fail\"] += 1 return 0 else: #imgt_warn = \"\\n! IMGT FWR/CDR sequence columns", "checkArgs(parser) args = parser.parse_args() args_dict = parseCommonArgs(args) del args_dict[\"db_files\"] # Call main for", "(dict): contains log information for each sequence out_dir (str): directory for output files.", "= len(sequences[0]) nseqs = len(sequences) imgtar = clones[0].getField(\"imgtpartlabels\") germline = clones[0].getField(\"germline_imgt_d_mask\") if germline", "range(0,len(keys)-1): for j in range(i+1,len(keys)): ki = keys[i] kj = keys[j] if meta_data", "# open input file handle = open(db_file, \"r\") records = reader(handle) fail_handle, fail_writer", "edited input sequence if not recursive: frameshifts += checkFrameShifts(receptor, oqpos, ospos, log, debug)", "newgerm (str) : modified germline of clonal lineage. conseqs (list) : consensus sequences.", "MATCH\", debug) log[\"PASS\"] = False #if no match for the adjacent codon was", "HLP model?\"\"\") igphyml_group.add_argument(\"--asr\", action=\"store\", dest=\"asr\", type=float, default=-1, help=\"\"\"Ancestral sequence reconstruction interval (0-1).\"\"\") return", "[] ngermline = [] ncdr3 = 0 #print(\"imgtarlen: \" + str(len(imgtar))) #print(\"seqlen: \"", "# Output fasta file of masked, concatenated sequences outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse,", "clone: %s , id: %s, seq: %s\" %(len(sequences[0]),\\ clones[0].clone,clones[0].sequence_id,sequences[0])) return imgtar, germline, sites,", "found_no_funct = True all_records.append(r) if r.clone in init_clone_sizes: init_clone_sizes[r.clone] += 1 else: init_clone_sizes[r.clone]", "{\"pass\": None, \"fail\": None} if pass_handle is not None: output[\"pass\"] = pass_handle.name pass_handle.close()", "(seqdiff) if sites % 3 != 0: printError(\"number of sites must be divisible", "[] imgt = [] for j in range(0, nseqs): for i in range(0,", "recursive (bool) : was this method part of a recursive call? mask (bool)", "nci = 0 if useqs[ki] in join: nci = ambigchar[join[useqs[ki]]] if nci <", "before exporting to fasta.\"\"\") group.add_argument(\"--ncdr3\", action=\"store_true\", dest=\"ncdr3\", help=\"\"\"If specified, remove CDR3 from all", "qcodons (list): list of codons in input sequence spos (int): starting position of", "estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"-t\", action=\"store\", dest=\"kappa\", type=str, default=\"e\", choices=(\"e\",", "conseqs, duplicate, imgt): \"\"\" Create intermediate sequence alignment and partition files for IgPhyML", "False) printError(c.getField(\"imgtpartlabels\"), False) printError(\"%s\\n%d\\n\" % (imgtar, j)) #Resolve germline if there are differences,", "Returns: str: modified IMGT gapped sequence. log: dict of sequence information \"\"\" debug", "s_end = i printDebug(\"%i:%i:%s\" % (s_end, len(scodons), scodons[s_end]), debug) s_end += 1 qpos", "1 or 2 nucleotides downstream of offending codon receptor.sequence_input = ros[0:(psite + 3)]", "of output pass and fail files. \"\"\" # Print parameter info log =", "not None else None log[\"RECORDS\"] = fails[\"totalreads\"] log[\"INITIAL_FILTER\"] = fails[\"rec_count\"] log[\"PASS\"] = pass_count", "or nothing (n), for IgPhyML.\"\"\") igphyml_group.add_argument(\"--omega\", action=\"store\", dest=\"omega\", type=str, default=\"e,e\", choices = (\"e\",", "seqi in range(0, len(sequences)): i = sequences[seqi] if len(i) != sites or len(clones[seqi].getField(\"imgtpartlabels\"))", "0 for j in range(0, nseqs): if sequences[j][i:(i + 3)] != \"...\": tally", "= \"\" for i in range(0, sites, 3): if tallies[i//3] > 0: newgerm.append(germline[i:(i+3)])", "kj = keys[j] if meta_data is None: ski = keys[i] skj = keys[j]", "__author__ = \"<NAME>\" from changeo import __version__, __date__ # Imports import os import", "regions = rd.getRegions(r.sequence_imgt) if regions[\"cdr3_imgt\"] is not \"\" and regions[\"cdr3_imgt\"] is not None:", "log of sequence errors. meta_data (str): Field to append to sequence IDs. Splits", "j in range(0,len(sequences)): cimgt = clones[j].getField(\"imgtpartlabels\") seqdiff = maxlen - len(sequences[j]) imgtdiff =", "qpos < len(qcodons): if debug: print(scodons[spos] + \"\\t\" + qcodons[qpos]) if scodons[spos] ==", "False log[rfrom.sequence_id][\"DUPLICATE\"] = True log[rfrom.sequence_id][\"COLLAPSETO\"] = joinseqs[k] log[rfrom.sequence_id][\"COLLAPSEFROM\"] = k log[rfrom.sequence_id][\"FAIL\"] = \"Collapsed", "= receptors[useqs[k]] rto = receptors[join[useqs[k]]] rto.dupcount += rfrom.dupcount if log is not None:", "scodons[-1] == \"..\" or scodons[-1] == \".\": scodons[-1] = \"...\" else: scodons[-1] =", "if meta_data is not None: c.setField(meta_data[0],c.getField(meta_data_ar[0])) for m in range(1,len(meta_data_ar)): st = c.getField(meta_data[0])+\":\"+c.getField(meta_data_ar[m])", "from metadata md = md.replace(\")\",\"-\") #remove parenthesis from metadata md = md.replace(\"(\",\"-\") #remove", "seq1 (str): sequence 1 seq2 (str): sequence 2 fbreak (bool): break after first", "to include sequence.\", debug) log[\"PASS\"] = False log[\"FAIL\"] = \"FRAME-SHIFTING DELETION\" log[\"SEQ_IN\"] =", "intermediate files? (none, all) \"\"\" osplit = outfile.split(\".\") outrep = \".\".join(osplit[0:(len(osplit)-1)]) + \"_gy.tsv\"", "ncounti: join[useqs[kj]] = useqs[ki] joinseqs[kj] = ki # loop through list of joined", "\"NN\" + scodons[i][2] #sometimes IMGT will just cut off first letter if non-match,", "= receptor.sequence_imgt log = OrderedDict() log[\"ID\"]=receptor.sequence_id log[\"CLONE\"]=receptor.clone log[\"PASS\"] = True if debug: print(receptor.sequence_id)", "0 for i in range(0,len(seq1)): if seq1[i] != \"N\" and seq1[i] != \"-\"", ":-/\") try: #get GY94 starting topologies p = subprocess.check_output(gy_args) except subprocess.CalledProcessError as e:", "other times it will be legitimately absent from the query, at which point", "list of codons in IMGT sequence qcodons (list): list of codons in input", "dest=\"asr\", type=float, default=-1, help=\"\"\"Ancestral sequence reconstruction interval (0-1).\"\"\") return parser if __name__ ==", "the same, move both forward spos += 1 qpos += 1 elif qcodons[qpos]", "r.sequence_id = r.sequence_id.replace(\")\",\"-\") #remove parenthesis from sequence ID r.sequence_id = r.sequence_id.replace(\"(\",\"-\") #remove parenthesis", "clones: clones[r.clone].append(r) cloneseqs[r.clone].append(mask_seq) else: clones[r.clone] = [r] cloneseqs[r.clone] = [mask_seq] return 1 else:", "= mout[0] ptcs = hasPTC(mask_seq) if ptcs >= 0: printWarning(\"Masked sequence suddenly has", "ncountj = kj.count(\"A\") + kj.count(\"T\") + kj.count(\"G\") + kj.count(\"C\") ambigchar[useqs[ki]] = ncounti ambigchar[useqs[kj]]", "clones[i].getField(\"imgtpartlabels\") germline = clones[i].getField(\"germline_imgt_d_mask\") nseq = [] nimgtar = [] ngermline = []", "\"FAILED_MATCH_QSTRING:\"+str(spos) #figure out if this was due to a frame-shift by repeating this", "receptor objects within the same clone. sequences (list): sequences within the same clone", "False return -len(conseqs) # Output fasta file of masked, concatenated sequences outputSeqPartFiles(out_dir, useqs_f,", "= \"_\" duplicate = True # duplicate sequences in clones with only 1", "r.clone log[\"PASS\"] = False log[\"FAIL\"] = \"NONFUNCTIONAL/PTC\" log[\"SEQ_IN\"] = r.sequence_input logs[r.sequence_id] = log", "not None: matches = 0 for m in meta_data: if ri.getField(m) == rj.getField(m)", "or r.clone in target_clones: if init_clone_sizes[r.clone] >= min_seq: big_enough.append(r) fails[\"totalreads\"] = len(all_records) #fails[\"minseq_fail\"]", "= [r] cloneseqs[r.clone] = [mask_seq] return 1 else: if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] +=", "\"IN-FRAME\" in log: log[\"IN-FRAME\"] = log[\"IN-FRAME\"] + \",\" + str(spos) else: log[\"IN-FRAME\"] =", "ospos), debug) log[\"PASS\"]=False log[\"FAIL\"]=\"FAILED_MATCH:\"+str(spos) elif qcodons[qpos] == scodons[spos]: printDebug(\"Masked %s at position %d\"", "i in range(0,len(si)): if si[i] == \"-\": gaps.append(1) ndotgaps.append(1) else: gaps.append(0) nsi =", "== 0: newseqs.append([]) if tallies[i//3] > 0: newseqs[j].append(sequences[j][i:(i+3)]) lcodon = \"\" for i", "or txt) nohlp (bool): If True, only estimate GY94 trees and parameters clean", "= newgerm[-1] + \"NN\" if ncdr3: ngerm = [] nimgt = [] for", "c.getField(meta_data[0])+\":\"+c.getField(meta_data_ar[m]) c.setField(meta_data[0],st) if len(c.getField(\"imgtpartlabels\")) != len(imgtar): printError(\"IMGT assignments are not the same within", "not None: meta_data_list = [] for m in range(0,len(meta_data)): if isinstance(clones[j].getField(meta_data[m]), str): clones[j].setField(meta_data[m],clones[j].getField(meta_data[m]).replace(\"_\",", "1 + oqpos*3 pisite = ospos * 3 if (psite + 3 +", "value in useqs dict). log (collections.OrderedDict): log of sequence errors. meta_data (str): Field", "for i in range(0, len(si), 3)] # deal with the fact that it's", "duplicate: if meta_data is not None: if meta_data[0] == \"DUPCOUNT\": cid = delim", "j in range(0, len(clones[str(k)])): logs[clones[str(k)][j].sequence_id][\"FAIL\"] = \"Clone too small: \" + str(len(cloneseqs[str(k)])) logs[clones[str(k)][j].sequence_id][\"PASS\"]", "to sequence IDs. Splits identical sequences with different meta_data. delim (str): delimited to", "too small: \" + str(len(conseqs)) logs[clones[j].sequence_id][\"PASS\"] = False return -len(conseqs) # Output fasta", "scodons = [si[i:i + 3] for i in range(0, len(si), 3)] # deal", "= meta_data[0].split(\",\") for c in clones: if meta_data is not None: c.setField(meta_data[0],c.getField(meta_data_ar[0])) for", "sequence in input sequence s_end (int): end of IMGT sequence qpos (int): starting", "<gh_stars>1-10 #!/usr/bin/env python3 \"\"\" Converts TSV files into IgPhyML input files \"\"\" #", "frame 1. Returns: int: negative if not PTCs, position of PTC if found.", "cid.replace(\":\", \"_\") sid = clones[num].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) if", "True, only estimate GY94 trees and parameters format (str): input and output format.", "seq_f, num in useqs_f.items(): seq = seq_f cid = \"\" if meta_data is", "+ regions[\"fwr4_imgt\"] if len(simgt) < len(r.sequence_imgt): r.fwr4_imgt = r.fwr4_imgt + (\".\"*(len(r.sequence_imgt) - len(simgt)))", "r in big_enough: if r.functional is None: r.functional = True if found_no_funct is", "print(scodons[spos] + \"\\t\" + qcodons[qpos]) if scodons[spos] == \"...\" and qcodons[qpos] != \"...\":", "IDs. Splits identical sequences with different meta_data. Returns: tuple: tuple of length four", "%d %d\" % (scodons[spos], spos, qpos)) ospos=spos oqpos=qpos spos += 1 qpos +=", "asr >= 0: hlp_args.append(\"--ASRc\") hlp_args.append(str(asr)) log = OrderedDict() log[\"START\"] = \"IgPhyML GY94 tree", "= ArgumentParser(description=__doc__, epilog=fields, parents=[parser_parent], formatter_class=CommonHelpFormatter, add_help=False) group = parser.add_argument_group(\"sequence processing arguments\") group.add_argument(\"--collapse\", action=\"store_true\",", "= \"NNN\" if \"MASKED\" in log: log[\"MASKED\"] = log[\"MASKED\"] + \",\" + str(spos)", "objects. collapse (bool) : deduplicate sequences. nseqs (int): number of sequences. delim (str)", "return qi, spos def checkFrameShifts(receptor, oqpos, ospos, log, debug): \"\"\" Checks whether a", "= 0 #print(\"imgtarlen: \" + str(len(imgtar))) #print(\"seqlen: \" + str(len(sequences[i]))) #print(\"germline: \" +", "pairwise, and all are zero # distance from the sequence they will be", "imgtpartlabels = [0] * len(r.sequence_imgt) r.setField(\"imgtpartlabels\", imgtpartlabels) mout = maskSplitCodons(r, mask=mask) mask_seq =", "!= sites or len(clones[seqi].getField(\"imgtpartlabels\")) != len(imgtar): correctseqs = True if correctseqs: maxlen =", "igphyml_group.add_argument(\"--omega\", action=\"store\", dest=\"omega\", type=str, default=\"e,e\", choices = (\"e\", \"ce\", \"e,e\", \"ce,e\", \"e,ce\", \"ce,ce\"),", "spos = i break else: spos = i break return qi, spos def", "sequence. fails (dict): counts of various sequence processing failures. out_args (dict): arguments for", "# print(\"\\n\" + str((germline))) for j in range(0,len(imgtar)): if imgtar[j] != 108: nseq.append(sequences[i][j])", "tuple of length four containing a list of IMGT positions for first sequence", "of subsampling before deduplication min_seq (int): minimum number of sequences per clone append", "\"--threads\", str(nproc),\"--outname\",gyout] hlp_args = [\"igphyml\",\"--repfile\", outrep, \"-m\", \"HLP\", \"--run_id\", \"hlp\", \"--threads\", str(nproc), \"-o\",", "False log[\"FAIL\"] = \"NONFUNCTIONAL/PTC\" log[\"SEQ_IN\"] = r.sequence_input logs[r.sequence_id] = log if out_args[\"failed\"]: fail_writer.writeReceptor(r)", "= \"Clone too small: \" + str(len(cloneseqs[str(k)])) logs[clones[str(k)][j].sequence_id][\"PASS\"] = False clonesizes[str(k)] = -len(cloneseqs[str(k)])", "group.add_argument(\"--collapse\", action=\"store_true\", dest=\"collapse\", help=\"\"\"If specified, collapse identical sequences before exporting to fasta.\"\"\") group.add_argument(\"--ncdr3\",", "imgtpartlabels = [13]*len(regions[\"fwr1_imgt\"]) + [30]*len(regions[\"cdr1_imgt\"]) + [45]*len(regions[\"fwr2_imgt\"]) + \\ [60]*len(regions[\"cdr2_imgt\"]) + [80]*len(regions[\"fwr3_imgt\"]) +", "= fails[\"minseq_fail\"] log[\"CDRFWR_ERROR\"] = fails[\"region_fail\"] log[\"GERMLINE_PTC\"] = fails[\"germlineptc\"] log[\"OTHER_FAIL\"] = fails[\"other_fail\"] if collapse:", "nsi = \"\" for i in range(0,len(si)): if si[i] == \"-\": gaps.append(1) ndotgaps.append(1)", "a PTC.. %s\\n\" % r.sequence_id) mout[1][\"PASS\"] = False mout[1][\"FAIL\"] = \"PTC_ADDED_FROM_MASKING\" logs[mout[1][\"ID\"]] =", "qi (str) : input sequence. spos (int) : starting position of IMGT sequence", "gaps.append(1) ndotgaps.append(1) else: gaps.append(0) nsi = nsi + si[i] if si[i] != \".\":", "functools import partial # Presto and changeo imports from presto.Defaults import default_out_args from", "def deduplicate(useqs, receptors, log=None, meta_data=None, delim=\":\"): \"\"\" Collapses identical sequences Argument: useqs (dict):", "1: ncon_seq = ncon_seq + \".\" elif i == 0: ncon_seq = ncon_seq", "= c.getField(\"germline_imgt\") if ngermline != germline: resolveglines = True if resolveglines: printError(\"%s %s\"", "join[useqs[kj]] = useqs[ki] joinseqs[kj] = ki # loop through list of joined sequences", "all sequences and germline of a clone Arguments: sequences (list): list of sequences", "os.path.split(pass_handle.name) if out_args[\"out_name\"] is None: __, clone_name, __ = splitName(db_file) else: clone_name =", "then predict germlines using --cloned\")) if sites > (len(germline)): seqdiff = sites -", "object for a particular sequence. clones (list): list of receptors. cloneseqs (list): list", "len(ros) and (pisite + 3) < len(ris): #cut out 1 or 2 nucleotides", "if meta_data is not None: for i in range(1, len(meta_data)): germ_id.append(\"GERM\") pass_handle.write(\"%s\\t%s\\t%s_%s\\t%s\\n\" %", "== \"N\": # possible that SEQ-IMGT ends on a bunch of Ns qpos", "3): tally = 0 for j in range(0, nseqs): if sequences[j][i:(i + 3)]", "#tally where --- gaps are in IMGT sequence and remove them for now", "information \"\"\" debug = False qi = receptor.sequence_input si = receptor.sequence_imgt log =", "- pass_count # End clone processing printMessage(\"Done\", start_time=start_time, end=True, width=50) log_handle = None", "codon that matches next site if debug: print(\"checking %s at position %d %d\"", "+ regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] nseq = r.sequence_imgt[len(simgt):len(r.sequence_imgt)] if", "r.dupcount = 1 fails[\"rec_count\"] += 1 #printProgress(rec_count, rec_count, 0.05, start_time) ptcs = hasPTC(r.sequence_imgt)", "mismatches between IMGT labels within a clone Arguments: sequences (list): list of sequences", "\"N\": # possible that SEQ-IMGT ends on a bunch of Ns qpos +=", "IDs. Splits identical sequences with different meta_data collapse (bool): if True collapse identical", "maximgt: imgtar = clones[j].getField(\"imgtpartlabels\") maximgt = len(imgtar) sites = maxlen for j in", "(list) : consensus sequences. duplicate (bool) : duplicate sequence if only one in", "0: printError(\"\\n\\nNo sequences found that match specified criteria.\",1) if sample_depth > 0: random.shuffle(big_enough)", "\"\"\" ptcs = (\"TAA\", \"TGA\", \"TAG\", \"TRA\", \"TRG\", \"TAR\", \"TGR\", \"TRR\") for i", "len(regions[\"fwr1_imgt\"]) + [30] * len(regions[\"cdr1_imgt\"]) + [45] * len( regions[\"fwr2_imgt\"]) + \\ [60]", "= nseq newgerm = ngerm imgt = nimgt #print(\"Length: \" + str(ncdr3)) useqs_f", "sites from observed data newgerm = [] imgt = [] for j in", "fasta file of masked, concatenated sequences outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim,", "= [0] * len(r.sequence_imgt) r.setField(\"imgtpartlabels\", imgtpartlabels) mout = maskSplitCodons(r, mask=mask) mask_seq = mout[0]", "tuple: (modified input sequence, modified starting position of IMGT sequence in input sequence).", "id: %s, seq: %s\" %(len(sequences[0]),\\ clones[0].clone,clones[0].sequence_id,sequences[0])) return imgtar, germline, sites, nseqs def outputSeqPartFiles(out_dir,", "clones[num].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) if len(useqs_f) == 1 and", "os.path.basename(db_file) log[\"COLLAPSE\"] = collapse printLog(log) # Open output files out_label = \"lineages\" pass_handle", "no match for the adjacent codon was found, something\"s up. log[\"FAIL\"] = \"FAILED_MATCH_QSTRING:\"+str(spos)", "action=\"store\", dest=\"meta_data\", help=\"\"\"List of fields to containing metadata to include in output fasta", "spos < s_end: printDebug(\"FAILING MATCH\", debug) log[\"PASS\"] = False #if no match for", "files for IgPhyML Arguments: db_file (str): input tab-delimited database file. meta_data (str): Field", "log[\"DUPLICATE\"] = fail_count - fails[\"seq_fail\"] log[\"END\"] = \"BuildTrees\" printLog(log) #Run IgPhyML on outputted", "ris = receptor.sequence_imgt psite = receptor.v_seq_start - 1 + oqpos*3 pisite = ospos", "m in meta_data: if ri.getField(m) == rj.getField(m) and m != \"DUPCOUNT\": matches +=", "igphyml_group.add_argument(\"--hotness\", action=\"store\", dest=\"hotness\", type=str, default=\"e,e,e,e,e,e\", help=\"\"\"Mutability parameters to estimate: e = estimate, ce", "Optimize combination of topology (t) branch lengths (l) and parameters (r) in IgPhyML.", "regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + nseq imgtpartlabels", "[] for j in range(0, nseqs): for i in range(0, sites, 3): if", "not None: for i in range(1, len(meta_data)): germ_id.append(\"GERM\") pass_handle.write(\"%s\\t%s\\t%s_%s\\t%s\\n\" % (outfile, \"N\", key,\"_\".join(germ_id),", "sequence in clones, the germline sequence of the first receptor in clones, the", "debug) printDebug(receptor.sequence_imgt, debug) printDebug(\"RUNNING %d\" % ins, debug) mout = maskSplitCodons(receptor, recursive=True) if", "= \"IgPhyML analysis\" printLog(log) # Note: Collapse can give misleading dupcount information if", "[] for m in meta_data: meta_data_list.append(clones[j].getField(m).replace(\":\", \"_\")) cid = delim + str(delim.join(meta_data_list)) sid", "def findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log, debug, recursive=False): \"\"\" Find and", "in range(1, 3): ros = receptor.sequence_input ris = receptor.sequence_imgt psite = receptor.v_seq_start -", "= (matches == len(meta_data)) if dist == 0 and m_match: ncounti = ki.count(\"A\")", "gaps back to IMGT sequence ncon_seq = \"\" counter = 0 for i", "a clone. maps sequence to index in Receptor list. receptors (dict): receptors within", "i in range(0, len(qi), 3)] frameshifts = 0 s_end = 0 #adjust for", "imports from presto.Defaults import default_out_args from presto.IO import printLog, printMessage, printWarning, printError, printDebug", "where --- gaps are in IMGT sequence and remove them for now gaps", "!= \"...\" and len(scodons[i]) == 3 and scodons[i] != \"NNN\": s_end = i", "m in append: r.sequence_id = r.sequence_id + \"_\" + r.getField(m) total += maskCodonsLoop(r,", "curgap += 1 elif i == 0 and curgap != 0: if curgap", "printMessage(\"Done\", start_time=start_time, end=True, width=50) log_handle = None if out_args[\"log_file\"] is not None: log_handle", "failed sequences writer object. Returns: 0: returns 0 if an error occurs or", "out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=out_args[\"out_type\"]) fail_writer = writer(fail_handle, fields=out_fields) cloneseqs = {} clones = {}", "to containing metadata to include in output fasta file sequence headers.\"\"\") group.add_argument(\"--clones\", nargs=\"+\",", "from all sequences and germline of a clone Arguments: sequences (list): list of", "nclones = 0, 0 printMessage(\"Processing clones\", start_time=start_time, width=50) for k in clones.keys(): if", "qcodons, spos, s_end, qpos, log, debug, recursive) if not log[\"PASS\"] and not recursive:", "sequence log (dict): log of information for each sequence debug (bool): print debugging", "i in range(1,len(meta_data)): germ_id.append(\"GERM\") clonef.write(\">%s_%s\\n\" % (clones[0].clone,\"_\".join(germ_id))) for i in range(0, len(newgerm)): clonef.write(\"%s\"", "seq: %s\" %(len(sequences[0]),\\ clones[0].clone,clones[0].sequence_id,sequences[0])) return imgtar, germline, sites, nseqs def outputSeqPartFiles(out_dir, useqs_f, meta_data,", "+ concatenated_seq[counter:] concatenated_seq = ncon_seq log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] =", "igphyml (bool): If True, run IgPhyML on outputted data nproc (int) : Number", "correctMidCodonStart(scodons, qi, debug): \"\"\" Find and mask split codons Arguments: scodons (list): list", "oqpos (int) : position of interest in input sequence. ospos (int) : position", "estimation\" printLog(log) try: #check for igphyml executable subprocess.check_output([\"igphyml\"]) except: printError(\"igphyml not found :-/\")", "def checkFrameShifts(receptor, oqpos, ospos, log, debug): \"\"\" Checks whether a frameshift occured in", "# id -> sequence id to join with (least ambiguous chars) joinseqs =", "None: log[rfrom.sequence_id][\"PASS\"] = False log[rfrom.sequence_id][\"DUPLICATE\"] = True log[rfrom.sequence_id][\"COLLAPSETO\"] = joinseqs[k] log[rfrom.sequence_id][\"COLLAPSEFROM\"] = k", "+ str((sequences[i])) ) # print(\"\\n\" + str((germline))) for j in range(0,len(imgtar)): if imgtar[j]", "in range(i+1,len(keys)): ki = keys[i] kj = keys[j] if meta_data is None: ski", "of masked clone sequences. logs (dict): contains log information for each sequence. fails", "ambigchar = {} #sequence id -> number ATCG nucleotides for i in range(0,len(keys)-1):", "error occurs or masking fails. 1: returns 1 masking succeeds \"\"\" if r.clone", "0: ngerm.append(newgerm[j]) nimgt.append(imgt[j]) else: ncdr3 += 1 newseqs[i] = nseq newgerm = ngerm", "sequences mapped to ids. meta_data (str): Field to append to sequence IDs. Splits", "for i in range(0,len(keys)-1): for j in range(i+1,len(keys)): ki = keys[i] kj =", "Arguments: receptor (changeo.Receptor.Receptor): Receptor object. scodons (list): list of codons in IMGT sequence", "recursive (bool): was this function called recursively? \"\"\" frameshifts = 0 while spos", "list of sequences in clones. clones (list): list of Receptor objects. meta_data (str):", "None: for m in append: r.sequence_id = r.sequence_id + \"_\" + r.getField(m) total", "if len(big_enough) == 0: printError(\"\\n\\nNo sequences found that match specified criteria.\",1) if sample_depth", "useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt) if collapse: return", "on the fact that all sequences are compared pairwise, and all are zero", "len(newgerm)): clonef.write(\"%s\" % newgerm[i].replace(\".\",\"-\")) clonef.write(\"\\n\") #output partition file partfile = os.path.join(out_dir, \"%s.part.txt\" %", "== 0: printError(\"\\n\\nNo sequences found that match specified criteria.\",1) if sample_depth > 0:", "codons.\"\"\") group.add_argument(\"--md\", nargs=\"+\", action=\"store\", dest=\"meta_data\", help=\"\"\"List of fields to containing metadata to include", "3! len: %d, clone: %s , id: %s, seq: %s\" %(len(sequences[0]),\\ clones[0].clone,clones[0].sequence_id,sequences[0])) return", "= receptors[useqs[kj]] dist = unAmbigDist(ski, skj, True) m_match = True if meta_data is", "number of data sequences. Any clones with fewer than the specified number of", "collapse keys = list(useqs.keys()) for k in keys: if useqs[k] in join: rfrom", "estimate GY94 trees and parameters format (str): input and output format. out_args (dict):", "if useqs[k] in join: rfrom = receptors[useqs[k]] rto = receptors[join[useqs[k]]] rto.dupcount += rfrom.dupcount", "if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"nf_fail\"] += 1 return 0 # Run", "ngermline.append(germline[j]) nimgtar.append(imgtar[j]) else: ncdr3 += 1 clones[i].setField(\"imgtpartlabels\",nimgtar) clones[i].setField(\"germline_imgt_d_mask\", \"\".join(ngermline)) sequences[i] = \"\".join(nseq) #print(\"Length:", "codon in previous position is equal to original codon, it was preserved qpos", "in append: r.sequence_id = r.sequence_id + \"_\" + r.getField(m) total += maskCodonsLoop(r, clones,", "divisible by three curgap = 0 for i in ndotgaps: if i ==", "== \"...\" and qcodons[qpos] != \"...\": #if IMGT gap, move forward in imgt", "in range(0,len(meta_data)): md = r.getField(meta_data[m]) md = md.replace(\",\",\"-\") #remove commas from metadata md", "object. oqpos (int) : position of interest in input sequence. ospos (int) :", "+= 1 return 0 else: #imgt_warn = \"\\n! IMGT FWR/CDR sequence columns not", "collapse and len(useqs_f) < min_seq: for seq_f, num in useqs_f.items(): logs[clones[num].sequence_id][\"FAIL\"] = \"Clone", "split codons.\"\"\") group.add_argument(\"--md\", nargs=\"+\", action=\"store\", dest=\"meta_data\", help=\"\"\"List of fields to containing metadata to", "sequence). \"\"\" spos = 0 for i in range(0, len(scodons)): printDebug(\"%s %s\" %", "qi, debug): \"\"\" Find and mask split codons Arguments: scodons (list): list of", "from metadata r.setField(meta_data[m],md) if append is not None: if append is not None:", "intermediate files? none: leave all intermediate files; all: delete all intermediate files.\"\"\") igphyml_group.add_argument(\"--optimize\",", "[30]*len(regions[\"cdr1_imgt\"]) + [45]*len(regions[\"fwr2_imgt\"]) + \\ [60]*len(regions[\"cdr2_imgt\"]) + [80]*len(regions[\"fwr3_imgt\"]) + [108] * len(regions[\"cdr3_imgt\"]) +", "logs, fails, out_args, fail_writer, mask = not nmask) if total == sample_depth: break", "files into IgPhyML input files \"\"\" # Info __author__ = \"<NAME>\" from changeo", "estimate for FWR,CDR respectively: e = estimate, ce = estimate + confidence interval\"\"\")", "parameter). meta_data (str): Field to append to sequence IDs. Splits identical sequences with", "epilog=fields, parents=[parser_parent], formatter_class=CommonHelpFormatter, add_help=False) group = parser.add_argument_group(\"sequence processing arguments\") group.add_argument(\"--collapse\", action=\"store_true\", dest=\"collapse\", help=\"\"\"If", "(changeo.Receptor.Receptor): receptor object for a particular sequence. clones (list): list of receptors. cloneseqs", "recursive call? mask (bool) : mask split codons for use with igphyml? Returns:", "#remove commas from metadata md = md.replace(\":\",\"-\") #remove colons from metadata md =", "until you find a codon that matches next site if debug: print(\"checking %s", "sid = clones[j].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) if nseqs ==", "__ = os.path.split(pass_handle.name) if out_args[\"out_name\"] is None: __, clone_name, __ = splitName(db_file) else:", "is not None: for i in range(1,len(meta_data)): germ_id.append(\"GERM\") clonef.write(\">%s_%s\\n\" % (clones[0].clone,\"_\".join(germ_id))) for i", "sequences within the same clone (share indexes with clones parameter). meta_data (str): Field", "r.sequence_id = r.sequence_id.replace(\":\",\"-\") #remove colons from sequence ID r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas", "from sequence ID r.sequence_id = r.sequence_id.replace(\")\",\"-\") #remove parenthesis from sequence ID r.sequence_id =", "sequence errors. meta_data (str): Field to append to sequence IDs. Splits identical sequences", "debug) printDebug(ris, debug) printDebug(receptor.sequence_imgt, debug) printDebug(\"RUNNING %d\" % ins, debug) mout = maskSplitCodons(receptor,", "in input sequence spos (int): starting position of IMGT sequence in input sequence", "+ scodons[i][1:3] if scodons[i][1:3] != qi[1:3] or scodons[i+1] != qi[3:6]: qi = \"N\"", "todelete = open(outrep) for line in todelete: line = line.rstrip(\"\\n\") line = line.rstrip(\"\\r\")", "if asr >= 0: hlp_args.append(\"--ASRc\") hlp_args.append(str(asr)) log = OrderedDict() log[\"START\"] = \"IgPhyML GY94", "clones with only 1 sequence? imgtar, germline, sites, nseqs = characterizePartitionErrors(sequences, clones, meta_data)", "= -len(cloneseqs[str(k)]) else: clonesizes[str(k)] = outputIgPhyML(clones[str(k)], cloneseqs[str(k)], meta_data=meta_data, collapse=collapse, ncdr3=ncdr3, logs=logs, fail_writer=fail_writer, out_dir=clone_dir,", "possible to start mid-codon qi,spos = correctMidCodonStart(scodons, qi, debug) qcodons = [qi[i:i +", "IMGT sequence. qi (str) : input sequence. spos (int) : starting position of", "positions . \"\"\" # bootstrap these data if desired lg = len(newgerm) sites", "gaps for i in range(spos, len(scodons)): if scodons[i] != \"...\" and len(scodons[i]) ==", "list of sequences in clones. clones (list): list of Receptor objects. \"\"\" for", "(\"Predicted germlines are not the same among sequences in the same clone.\", \"Be", "data.\\n\" imgtpartlabels = [0] * len(r.sequence_imgt) r.setField(\"imgtpartlabels\", imgtpartlabels) mout = maskSplitCodons(r, mask=mask) mask_seq", "j_call, clone_id, v_sequence_start \"\"\") # Parent parser parser_parent = getCommonArgParser(out_file=False, log=True, format=True) #", "= r.sequence_id log[\"CLONE\"] = r.clone log[\"SEQ_IN\"] = r.sequence_input log[\"SEQ_IMGT\"] = r.sequence_imgt logs[r.sequence_id] =", "+ regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"]", "\\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + nseq imgtpartlabels = [13] * len(regions[\"fwr1_imgt\"]) + [30]", "0: ncon_seq = ncon_seq + concatenated_seq[counter] counter += 1 ncon_seq = ncon_seq +", "datasets until sequences are clustered into clones.\") if r.dupcount is None: r.dupcount =", "clones[j].getField(\"imgtpartlabels\") maximgt = len(imgtar) sites = maxlen for j in range(0,len(sequences)): cimgt =", "3): if i == 0: newseqs.append([]) if tallies[i//3] > 0: newseqs[j].append(sequences[j][i:(i+3)]) lcodon =", "+ ki.count(\"T\") + ki.count(\"G\") + ki.count(\"C\") ncountj = kj.count(\"A\") + kj.count(\"T\") + kj.count(\"G\")", "0 #print(\"imgtarlen: \" + str(len(imgtar))) #print(\"seqlen: \" + str(len(sequences[i]))) #print(\"germline: \" + str(len(germline)))", "+= 1 if fbreak: break return dist def deduplicate(useqs, receptors, log=None, meta_data=None, delim=\":\"):", "Remove CDR3 from all sequences and germline of a clone Arguments: sequences (list):", "oformat (str): output format for IgPhyML (tab or txt) clean (str): delete intermediate", "of \" + clones[useqs_f[conseq_f]].sequence_id logs[clones[j].sequence_id][\"DUPLICATE\"]=True if fail_writer is not None: fail_writer.writeReceptor(clones[j]) else: useqs_f[conseq_f]", "3 if (psite + 3 + ins) < len(ros) and (pisite + 3)", "\"IgPhyML GY94 tree estimation\" printLog(log) try: #check for igphyml executable subprocess.check_output([\"igphyml\"]) except: printError(\"igphyml", "collapse (bool): if True collapse identical sequences. ncdr3 (bool): if True remove CDR3", "sequences. Any clones with fewer than the specified number of sequences will be", "delim + \"0\" sid = clones[j].sequence_id.translate(transtable)+\"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\")))", "\"tab\": igf = open(igphyml_out) names = igf.readline().split(\"\\t\") vals = igf.readline().split(\"\\t\") for i in", "identical sequences. ncdr3 (bool): if True remove all CDR3s. nmask (bool): if False,", "qcodons[qpos] != scodons[spos]: scodons[ospos] = \"NNN\" if spos >= s_end: printDebug(\"Masked %s at", "if non-match, at which point we\"ll just want to mask the #first codon", "= OrderedDict() log[\"END\"] = \"IgPhyML analysis\" printLog(log) # Note: Collapse can give misleading", "imgt = [] for j in range(0, nseqs): for i in range(0, sites,", "gptcs >= 0: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"SEQ_IN\"]", "= 0 si = nsi scodons = [si[i:i + 3] for i in", "one in a clone. imgt (list) : IMGT numbering of clonal positions .", "end=True, width=50) log_handle = None if out_args[\"log_file\"] is not None: log_handle = open(out_args[\"log_file\"],", "if found_no_funct is False: printWarning(\"FUNCTIONAL column not found.\") found_no_funct = True all_records.append(r) if", "nseq.append(newseqs[i][j]) if i == 0: ngerm.append(newgerm[j]) nimgt.append(imgt[j]) else: ncdr3 += 1 newseqs[i] =", "1 else: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"PASS\"] =", "3 + ins) < len(ros) and (pisite + 3) < len(ris): #cut out", "del args_dict[\"db_files\"] # Call main for each input file for f in args.__dict__[\"db_files\"]:", "if i == 0: ngerm.append(newgerm[j]) nimgt.append(imgt[j]) else: ncdr3 += 1 newseqs[i] = nseq", "if ptcs >= 0: printWarning(\"Masked sequence suddenly has a PTC.. %s\\n\" % r.sequence_id)", "\"\"\" # Parse command line arguments parser = getArgParser() checkArgs(parser) args = parser.parse_args()", "log = OrderedDict() log[\"ID\"]=receptor.sequence_id log[\"CLONE\"]=receptor.clone log[\"PASS\"] = True if debug: print(receptor.sequence_id) # adjust", "conseqs (list) : consensus sequences. duplicate (bool) : duplicate sequence if only one", "statements. \"\"\" frameshifts = 0 for ins in range(1, 3): ros = receptor.sequence_input", "len(newgerm) sites = range(0, lg) transtable = clones[0].sequence_id.maketrans(\" \", \"_\") outfile = os.path.join(out_dir,", "= clones[0].getField(\"imgtpartlabels\") germline = clones[0].getField(\"germline_imgt_d_mask\") if germline is \"\": germline = clones[0].getField(\"germline_imgt\") correctseqs", "len(lcodon) == 2: newgerm[-1] = newgerm[-1] + \"N\" elif len(lcodon) == 1: newgerm[-1]", "clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) else: for j in range(0, nseqs): cid =", "%d\\n\" % (2, len(newgerm))) partf.write(\"FWR:IMGT\\n\") partf.write(\"CDR:IMGT\\n\") partf.write(\"%s\\n\" % (clones[0].v_call.split(\"*\")[0])) partf.write(\"%s\\n\" % (clones[0].j_call.split(\"*\")[0])) partf.write(\",\".join(map(str,", "# Info __author__ = \"<NAME>\" from changeo import __version__, __date__ # Imports import", "and fail files. \"\"\" # Print parameter info log = OrderedDict() log[\"START\"] =", "cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) if nseqs == 1 and duplicate: if", "qpos -= 1 spos = ospos printDebug(\"But codon was apparently preserved\", debug) if", "for j in range(0, nseqs): if sequences[j][i:(i + 3)] != \"...\": tally +=", "sequence ID r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID r.sequence_id = r.sequence_id.replace(\")\",\"-\")", "changeo.Alignment import RegionDefinition from changeo.Commandline import CommonHelpFormatter, checkArgs, getCommonArgParser, parseCommonArgs def correctMidCodonStart(scodons, qi,", "of clones. \"\"\" s = \"\" delim = \"_\" duplicate = True #", "printLog(log) todelete = open(outrep) for line in todelete: line = line.rstrip(\"\\n\") line =", "108: nseq.append(newseqs[i][j]) if i == 0: ngerm.append(newgerm[j]) nimgt.append(imgt[j]) else: ncdr3 += 1 newseqs[i]", "#cut out 1 or 2 nucleotides downstream of offending codon receptor.sequence_input = ros[0:(psite", "sequences in clones. \"\"\" sites = len(sequences[0]) nseqs = len(sequences) imgtar = clones[0].getField(\"imgtpartlabels\")", "ptcs: return i return -1 def rmCDR3(sequences, clones): \"\"\" Remove CDR3 from all", "(str): directory for output files. fail_writer (changeo.IO.TSVWriter): failed sequences writer object. min_seq (int):", "spos, s_end, qpos, log, debug, recursive) if not log[\"PASS\"] and not recursive: log[\"FRAMESHIFTS\"]", "spos = i break return qi, spos def checkFrameShifts(receptor, oqpos, ospos, log, debug):", "just a gap spos += 1 while qpos < len(qcodons) and spos <", "ospos), debug) if \"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" + str(spos)", "sequence suddenly has a PTC.. %s\\n\" % r.sequence_id) mout[1][\"PASS\"] = False mout[1][\"FAIL\"] =", "\"Be sure to cluster sequences into clones first and then predict germlines using", "in IgPhyML (--hotness) oformat (str): output format for IgPhyML (tab or txt) nohlp", "a frameshift occured in a sequence Arguments: receptor (changeo.Receptor.Receptor): Receptor object. oqpos (int)", "codons in IMGT sequence qcodons (list): list of codons in input sequence spos", "%d!\\n\" % c.clone, False) printError(c.getField(\"imgtpartlabels\"), False) printError(\"%s\\n%d\\n\" % (imgtar, j)) #Resolve germline if", "line in todelete: line = line.rstrip(\"\\n\") line = line.rstrip(\"\\r\") lsplit = line.split(\"\\t\") if", "target_clones is None or r.clone in target_clones: if init_clone_sizes[r.clone] >= min_seq: big_enough.append(r) fails[\"totalreads\"]", "clone_id, v_sequence_start \"\"\") # Parent parser parser_parent = getCommonArgParser(out_file=False, log=True, format=True) # Define", "nci < ncountj: join[useqs[ki]] = useqs[kj] joinseqs[ki] = kj else: ncj = 0", "(str): output format for IgPhyML (tab or txt) clean (str): delete intermediate files?", "+ 3] for i in range(0, len(si), 3)] # deal with the fact", "to estimate: e = estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--oformat\", action=\"store\",", "\"\"\" Converts TSV files into IgPhyML input files \"\"\" # Info __author__ =", "start mid-codon qi,spos = correctMidCodonStart(scodons, qi, debug) qcodons = [qi[i:i + 3] for", "input sequence in IMGT sequence log (dict): log of information for each sequence", "+ \"N\" elif len(lcodon) == 1: newgerm[-1] = newgerm[-1] + \"NN\" if ncdr3:", "0.05, start_time) log = OrderedDict() log[\"OUTPUT\"] = os.path.basename(pass_handle.name) if pass_handle is not None", "modified germline of clonal lineage. conseqs (list) : consensus sequences. duplicate (bool) :", "log[\"INITIAL_FILTER\"] = fails[\"rec_count\"] log[\"PASS\"] = pass_count log[\"FAIL\"] = fail_count log[\"NONFUNCTIONAL\"] = fails[\"nf_fail\"] log[\"FRAMESHIFT_DEL\"]", "return imgtar, germline, sites, nseqs def outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim,", "== 0 and m_match: ncounti = ki.count(\"A\") + ki.count(\"T\") + ki.count(\"G\") + ki.count(\"C\")", "clones[j].getField(\"imgtpartlabels\") seqdiff = maxlen - len(sequences[j]) imgtdiff = len(imgtar)-len(cimgt) sequences[j] = sequences[j] +", "subprocess.check_output([\"igphyml\"]) except: printError(\"igphyml not found :-/\") try: #get GY94 starting topologies p =", "for each sequence. fails (dict): counts of various sequence processing failures. out_args (dict):", "range(0, len(sequences)): i = sequences[seqi] if len(i) != sites or len(clones[seqi].getField(\"imgtpartlabels\")) != len(imgtar):", "= str(spos) concatenated_seq = Seq(\"\") for i in scodons: concatenated_seq += i #", "= r.sequence_id.replace(\"(\",\"-\") #remove parenthesis from sequence ID if(meta_data is not None): for m", "(dict): unique sequences mapped to ids. meta_data (str): Field to append to sequence", "in range(0, len(sequences)): printError(\"%s\\n%s\\n\" % (sequences[j],clones[j].getField(\"imgtpartlabels\")),False) printError(\"ChangeO file needs to be corrected\") for", "and mask split codons Arguments: scodons (list): list of codons in IMGT sequence.", "if dir_name is None: clone_dir = clone_name else: clone_dir = os.path.join(dir_name, clone_name) if", "in all_records: if target_clones is None or r.clone in target_clones: if init_clone_sizes[r.clone] >=", "clone_dir=clone_dir, nproc=nproc, optimization=optimization, omega=omega, kappa=kappa, motifs=motifs, hotness=hotness, oformat=oformat, nohlp=nohlp,clean=clean,asr=asr) return output def getArgParser():", "md = r.getField(meta_data[m]) md = md.replace(\",\",\"-\") #remove commas from metadata md = md.replace(\":\",\"-\")", "#output partition file partfile = os.path.join(out_dir, \"%s.part.txt\" % clones[0].clone) with open(partfile, \"w\") as", "log[\"FILE\"] = os.path.basename(db_file) log[\"COLLAPSE\"] = collapse printLog(log) # Open output files out_label =", "on gaps for i in range(spos, len(scodons)): if scodons[i] != \"...\" and len(scodons[i])", "trees and parameters clean (str): delete intermediate files? (none, all) \"\"\" osplit =", "= outrep + \"_igphyml_CIlog.txt_hlp\" if os.path.isfile(cilog): os.remove(cilog) if oformat == \"tab\": os.rmdir(clone_dir) else:", "+ kj.count(\"T\") + kj.count(\"G\") + kj.count(\"C\") ambigchar[useqs[ki]] = ncounti ambigchar[useqs[kj]] = ncountj #", "ambigchar[join[useqs[ki]]] if nci < ncountj: join[useqs[ki]] = useqs[kj] joinseqs[ki] = kj else: ncj", "__ = getFormatOperators(format) except ValueError: printError(\"Invalid format %s.\" % format) out_fields = getDbFields(db_file,", "(dict): receptors within a clone (index is value in useqs dict). log (collections.OrderedDict):", "log[\"SEQ_IN\"] = r.sequence_input log[\"SEQ_IMGT\"] = r.sequence_imgt logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"]", "clone_dir = clone_name else: clone_dir = os.path.join(dir_name, clone_name) if not os.path.exists(clone_dir): os.makedirs(clone_dir) #", "correct for this by looking at the next codon over in the #alignment", "and the number of sequences in clones. \"\"\" sites = len(sequences[0]) nseqs =", "igphyml=False, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", clean=\"none\", nohlp=False, asr=-1, format=default_format, out_args=default_out_args):", "mask=mask) mask_seq = mout[0] ptcs = hasPTC(mask_seq) if ptcs >= 0: printWarning(\"Masked sequence", "the germline sequence of the first receptor in clones, the length of the", "meta_data: if ri.getField(m) == rj.getField(m) and m != \"DUPCOUNT\": matches += 1 m_match", "# End clone processing printMessage(\"Done\", start_time=start_time, end=True, width=50) log_handle = None if out_args[\"log_file\"]", "log[rfrom.sequence_id][\"DUPLICATE\"] = True log[rfrom.sequence_id][\"COLLAPSETO\"] = joinseqs[k] log[rfrom.sequence_id][\"COLLAPSEFROM\"] = k log[rfrom.sequence_id][\"FAIL\"] = \"Collapsed with", "if gptcs >= 0: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone", "the frame. This attempts to correct for this by looking at the next", "\"totalreads\":0, \"passreads\":0, \"failreads\":0} # Mask codons split by indels start_time = time() printMessage(\"Correcting", "ncdr3 (bool): if True remove CDR3 logs (dict): contains log information for each", "range(1, 3): ros = receptor.sequence_input ris = receptor.sequence_imgt psite = receptor.v_seq_start - 1", "ros = receptor.sequence_input ris = receptor.sequence_imgt psite = receptor.v_seq_start - 1 + oqpos*3", "fail_writer, mask=True): \"\"\" Masks codons split by alignment to IMGT reference Arguments: r", "to be subsampled (before deduplication).\"\"\") group.add_argument(\"--append\", nargs=\"+\", action=\"store\", dest=\"append\", help=\"\"\"List of columns to", "3)] + ris[(pisite + 3):] # Debug sequence modifications printDebug(ros, debug) printDebug(receptor.sequence_input, debug)", "record their positions rd = RegionDefinition(r.junction_length, amino_acid=False) regions = rd.getRegions(r.sequence_imgt) if regions[\"cdr3_imgt\"] is", "for IgPhyML (tab or txt) nohlp (bool): If True, only estimate GY94 trees", "for m in meta_data: meta_data_list.append(clones[j].getField(m).replace(\":\", \"_\")) cid = delim + str(delim.join(meta_data_list)) sid =", "#print(\"germline: \" + str(len(germline))) #if len(germline) < len(sequences[i]): # print(\"\\n\" + str(clones[i].sequence_id)) #", "log[\"TS/TV\"] = kappa log[\"wFWR,wCDR\"] = omega log[\"MOTIFS\"] = motifs log[\"HOTNESS\"] = hotness log[\"NPROC\"]", "sequence reconstruction interval (0-1).\"\"\") return parser if __name__ == \"__main__\": \"\"\" Parses command", "(bool): if True remove all CDR3s. nmask (bool): if False, do not attempt", "+= 1 qpos += 1 while spos < s_end and scodons[spos] == \"...\":", "ki.count(\"T\") + ki.count(\"G\") + ki.count(\"C\") ncountj = kj.count(\"A\") + kj.count(\"T\") + kj.count(\"G\") +", "SEQ-IMGT ends on a bunch of Ns qpos += 1 spos += 1", "seq2[i] != \"N\" and seq2[i] != \"-\" and seq2[i] != \".\": if seq1[i]", "(r), or nothing (n), for IgPhyML.\"\"\") igphyml_group.add_argument(\"--omega\", action=\"store\", dest=\"omega\", type=str, default=\"e,e\", choices =", "hotness=\"e,e,e,e,e,e\", oformat=\"tab\", nohlp=False, asr=-1, clean=\"none\"): \"\"\" Run IgPhyML on outputted data Arguments: outfile", "ncon_seq + \".\" elif i == 0: ncon_seq = ncon_seq + concatenated_seq[counter] counter", "log (dict) : log of information for each sequence. debug (bool) : print", "+= 1 m_match = (matches == len(meta_data)) if dist == 0 and m_match:", "IDs. Splits identical sequences with different meta_data target_clones (str): List of clone IDs", "debug) if scodons[i] != \"...\": if scodons[i][0:2] == \"..\": scodons[i] = \"NN\" +", "qi = \"N\" + qi spos = i break else: spos = i", "log of information for each sequence debug (bool): print debugging statements? recursive (bool):", "clone. sequences (list): sequences within the same clone (share indexes with clones parameter).", "delim = \"_\" duplicate = True # duplicate sequences in clones with only", "of IMGT positions for first sequence in clones, the germline sequence of the", "sites = maxlen for j in range(0,len(sequences)): cimgt = clones[j].getField(\"imgtpartlabels\") seqdiff = maxlen", "(int): depth of subsampling before deduplication min_seq (int): minimum number of sequences per", "fbreak: break return dist def deduplicate(useqs, receptors, log=None, meta_data=None, delim=\":\"): \"\"\" Collapses identical", "debug) log[\"PASS\"] = False #if no match for the adjacent codon was found,", "newgerm[i].replace(\".\",\"-\")) clonef.write(\"\\n\") #output partition file partfile = os.path.join(out_dir, \"%s.part.txt\" % clones[0].clone) with open(partfile,", "+ \\ [60]*len(regions[\"cdr2_imgt\"]) + [80]*len(regions[\"fwr3_imgt\"]) + [108] * len(regions[\"cdr3_imgt\"]) + \\ [120] *", "= [] nsi = \"\" for i in range(0,len(si)): if si[i] == \"-\":", "of fields to containing metadata to include in output fasta file sequence headers.\"\"\")", "input sequence. ospos (int) : position of interest in IMGT sequence. log (dict)", "= \"Duplication of \" + clones[useqs_f[conseq_f]].sequence_id logs[clones[j].sequence_id][\"DUPLICATE\"]=True if fail_writer is not None: fail_writer.writeReceptor(clones[j])", "useq to join with (least ambiguous chars) ambigchar = {} #sequence id ->", "if collapse: return len(useqs_f) else: return nseqs def maskCodonsLoop(r, clones, cloneseqs, logs, fails,", "if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or simgt != r.sequence_imgt: log = OrderedDict() log[\"ID\"] =", "import multiprocessing as mp from argparse import ArgumentParser from collections import OrderedDict from", "at polymorphic sites def buildTrees(db_file, meta_data=None, target_clones=None, collapse=False, ncdr3=False, nmask=False, sample_depth=-1, min_seq=1,append=None, igphyml=False,", "next codon is just a gap spos += 1 while qpos < len(qcodons)", "% (sequences[j],clones[j].getField(\"imgtpartlabels\")),False) printError(\"ChangeO file needs to be corrected\") for j in range(0,len(imgtar)): if", "in IMGT sequence and remove them for now gaps = [] ndotgaps =", "(l) and parameters (r) in IgPhyML. omega (str): omega optimization in IgPhyML (--omega)", "respectively: e = estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"-t\", action=\"store\", dest=\"kappa\",", "failed\") log = OrderedDict() log[\"OUTPUT\"] = igphyml_out if oformat == \"tab\": igf =", "log of information for each sequence. debug (bool) : print debugging statements. \"\"\"", "\"PTC_ADDED_FROM_MASKING\" logs[mout[1][\"ID\"]] = mout[1] if mout[1][\"PASS\"]: #passreads += r.dupcount if r.clone in clones:", "spos += 1 else: # if not the same, mask IMGT at that", "detected.\\n! Cannot run CDR/FWR partitioned model on this data.\\n\" imgtpartlabels = [0] *", "= sites maximgt = len(imgtar) for j in range(0,len(sequences)): if len(sequences[j]) > maxlen:", "the ArgumentParser Returns: argparse.ArgumentParser: argument parsers. \"\"\" # Define input and output field", "1 elif mout[1][\"FAIL\"] == \"SINGLE FRAME-SHIFTING INSERTION\": fails[\"in_fail\"] += 1 else: fails[\"other_fail\"] +=", "clones. \"\"\" s = \"\" delim = \"_\" duplicate = True # duplicate", "= nimgt #print(\"Length: \" + str(ncdr3)) useqs_f = OrderedDict() conseqs = [] for", "file handle = open(db_file, \"r\") records = reader(handle) fail_handle, fail_writer = None, None", "unAmbigDist(seq1, seq2, fbreak=False): \"\"\" Calculate the distance between two sequences counting only A,T,C,Gs", "> ncounti: nci = 0 if useqs[ki] in join: nci = ambigchar[join[useqs[ki]]] if", "\"Collapsed with \" + rto.sequence_id del useqs[k] return useqs def hasPTC(sequence): \"\"\" Determines", "0 # Run IgPhyML on outputed data def runIgPhyML(outfile, igphyml_out, clone_dir, nproc=1, optimization=\"lr\",", "PTC\" fails[\"seq_fail\"] += 1 fails[\"germlineptc\"] += 1 return 0 if r.functional and ptcs", "a frame-shift by repeating this method but with an edited input sequence if", "mout[1] if mout[1][\"PASS\"]: #passreads += r.dupcount if r.clone in clones: clones[r.clone].append(r) cloneseqs[r.clone].append(mask_seq) else:", "ncon_seq = ncon_seq + \".\" elif i == 0: ncon_seq = ncon_seq +", "if sites > (len(germline)): seqdiff = sites - len(germline) germline = germline +", "c in clones: ngermline = c.getField(\"germline_imgt_d_mask\") if ngermline is \"\": ngermline = c.getField(\"germline_imgt\")", "then produces input files for IgPhyML Arguments: db_file (str): input tab-delimited database file.", "\"TAG\", \"TRA\", \"TRG\", \"TAR\", \"TGR\", \"TRR\") for i in range(0, len(sequence), 3): if", "= clones[i].getField(\"germline_imgt_d_mask\") nseq = [] nimgtar = [] ngermline = [] ncdr3 =", "unAmbigDist(ski, skj, True) m_match = True if meta_data is not None: matches =", "nseq = r.sequence_imgt[len(simgt):len(r.sequence_imgt)] if len(simgt) < len(r.sequence_imgt): simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] +", "ncon_seq = \"\" counter = 0 for i in gaps: #print(str(i) + \":\"", "print(\"\\n\" + str((germline))) for j in range(0,len(imgtar)): if imgtar[j] != 108: nseq.append(sequences[i][j]) if", "todelete.close() os.remove(outrep) os.remove(outfile) os.remove(gyout) cilog = outrep + \"_igphyml_CIlog.txt_hlp\" if os.path.isfile(cilog): os.remove(cilog) if", "clones[j].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) if nseqs == 1 and", "clones, and the number of sequences in clones. \"\"\" sites = len(sequences[0]) nseqs", "if out_args[\"out_name\"] is None: __, clone_name, __ = splitName(db_file) else: clone_name = out_args[\"out_name\"]", "None: output[\"pass\"] = pass_handle.name pass_handle.close() if fail_handle is not None: output[\"fail\"] = fail_handle.name", "by indels start_time = time() printMessage(\"Correcting frames and indels of sequences\", start_time=start_time, width=50)", "of information for each sequence. debug (bool) : print debugging statements. \"\"\" frameshifts", "os.remove(cilog) if oformat == \"tab\": os.rmdir(clone_dir) else: printWarning(\"Using --clean all with --oformat txt", "str(len(useqs_f)) logs[clones[num].sequence_id][\"PASS\"] = False return -len(useqs_f) elif not collapse and len(conseqs) < min_seq:", "= getOutputName(db_file, out_label=\"igphyml-pass\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=oformat) dir_name, __ = os.path.split(pass_handle.name) if out_args[\"out_name\"] is", "output Arguments: clones (list): receptor objects within the same clone. sequences (list): sequences", "germline sequence of the first receptor in clones, the length of the first", "1 pass_count += clonesizes[str(k)] else: fails[\"seq_fail\"] -= clonesizes[str(k)] fails[\"minseq_fail\"] -= clonesizes[str(k)] fail_count =", "(list) : IMGT numbering of clonal positions . \"\"\" # bootstrap these data", "%s\" % (seq1, seq2)) dist = 0 for i in range(0,len(seq1)): if seq1[i]", "any gaps not divisible by three curgap = 0 for i in ndotgaps:", "= md.replace(\"(\",\"-\") #remove parenthesis from metadata r.setField(meta_data[m],md) if append is not None: if", "+ concatenated_seq[counter] counter += 1 ncon_seq = ncon_seq + concatenated_seq[counter:] concatenated_seq = ncon_seq", "+ r.getField(m) total += maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer, mask =", "[80]*len(regions[\"fwr3_imgt\"]) + [108] * len(regions[\"cdr3_imgt\"]) + \\ [120] * len(regions[\"fwr4_imgt\"]) r.setField(\"imgtpartlabels\", imgtpartlabels) if", "< ncounti: join[useqs[kj]] = useqs[ki] joinseqs[kj] = ki # loop through list of", "correctseqs: maxlen = sites maximgt = len(imgtar) for j in range(0,len(sequences)): if len(sequences[j])", "tally = 0 for j in range(0, nseqs): if sequences[j][i:(i + 3)] !=", "changeo.Defaults import default_format from changeo.IO import splitName, getDbFields, getFormatOperators, getOutputHandle, getOutputName from changeo.Alignment", "which point #we have to shift the frame. This attempts to correct for", "logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"Germline PTC\" fails[\"seq_fail\"] += 1", "keys: if useqs[k] in join: rfrom = receptors[useqs[k]] rto = receptors[join[useqs[k]]] rto.dupcount +=", "parser = ArgumentParser(description=__doc__, epilog=fields, parents=[parser_parent], formatter_class=CommonHelpFormatter, add_help=False) group = parser.add_argument_group(\"sequence processing arguments\") group.add_argument(\"--collapse\",", "% clones[0].clone) with open(partfile, \"w\") as partf: partf.write(\"%d %d\\n\" % (2, len(newgerm))) partf.write(\"FWR:IMGT\\n\")", "printDebug(ris, debug) printDebug(receptor.sequence_imgt, debug) printDebug(\"RUNNING %d\" % ins, debug) mout = maskSplitCodons(receptor, recursive=True)", "log: log[\"IN-FRAME\"] = log[\"IN-FRAME\"] + \",\" + str(spos) else: log[\"IN-FRAME\"] = str(spos) elif", "nohlp=False, asr=-1, format=default_format, out_args=default_out_args): \"\"\" Masks codons split by alignment to IMGT reference,", "receptor object for a particular sequence. clones (list): list of receptors. cloneseqs (list):", "outputIgPhyML(clones, sequences, meta_data=None, collapse=False, ncdr3=False, logs=None, fail_writer=None, out_dir=None, min_seq=1): \"\"\" Create intermediate sequence", "+ 3)] != \"...\": tally += 1 tallies.append(tally) newseqs = [] # remove", "by IMGT definition. Arguments: receptor (changeo.Receptor.Receptor): Receptor object. recursive (bool) : was this", "Seq from functools import partial # Presto and changeo imports from presto.Defaults import", "debug: receptor.sequence_input = ros receptor.sequence_imgt = ris frameshifts += 1 printDebug(\"FRAMESHIFT of length", "len(big_enough) if len(big_enough) == 0: printError(\"\\n\\nNo sequences found that match specified criteria.\",1) if", "None log[\"RECORDS\"] = fails[\"totalreads\"] log[\"INITIAL_FILTER\"] = fails[\"rec_count\"] log[\"PASS\"] = pass_count log[\"FAIL\"] = fail_count", "print debugging statements. \"\"\" frameshifts = 0 for ins in range(1, 3): ros", "\"_gy.tsv\" gyout = outfile + \"_igphyml_stats_gy.txt\" gy_args = [\"igphyml\", \"--repfile\", outfile, \"-m\", \"GY\",", "a sequence Arguments: receptor (changeo.Receptor.Receptor): Receptor object. oqpos (int) : position of interest", "round(float(vals[i]),2) printLog(log) if clean != \"none\": log = OrderedDict() log[\"START\"] = \"CLEANING\" log[\"SCOPE\"]", "found, something\"s up. log[\"FAIL\"] = \"FAILED_MATCH_QSTRING:\"+str(spos) #figure out if this was due to", "with --oformat txt will delete all tree file results.\\n\" \"You'll have to do", "printMessage, printWarning, printError, printDebug from changeo.Defaults import default_format from changeo.IO import splitName, getDbFields,", "1 else: init_clone_sizes[r.clone] = 1 for r in all_records: if target_clones is None", "sequences outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt) if", "the #first codon in the IMGT seq, other times it will be legitimately", "clone. lineages successfully processed records. lineages-fail database records failed processing. igphyml-pass parameter estimates", "required fields: sequence_id, sequence, sequence_alignment, germline_alignment_d_mask or germline_alignment, v_call, j_call, clone_id, v_sequence_start \"\"\")", "codons split by alignment to IMGT reference, then produces input files for IgPhyML", "ris[(pisite + 3):] # Debug sequence modifications printDebug(ros, debug) printDebug(receptor.sequence_input, debug) printDebug(ris, debug)", "nucleotides downstream of offending codon receptor.sequence_input = ros[0:(psite + 3)] + ros[(psite +", "Output file name. igphymlout (str): igphyml output file nproc (int): Number of threads", "\".\": if seq1[i] != seq2[i]: dist += 1 if fbreak: break return dist", "sequence, sequence_alignment, germline_alignment_d_mask or germline_alignment, v_call, j_call, clone_id, v_sequence_start \"\"\") # Parent parser", "preferences. fail_writer (changeo.IO.TSVWriter): failed sequences writer object. Returns: 0: returns 0 if an", "i in range(0,len(keys)-1): for j in range(i+1,len(keys)): ki = keys[i] kj = keys[j]", "[\"GERM\"] if meta_data is not None: for i in range(1,len(meta_data)): germ_id.append(\"GERM\") clonef.write(\">%s_%s\\n\" %", "# Note: Collapse can give misleading dupcount information if some sequences have ambiguous", "= [] found_no_funct = False for r in records: if r.functional is None:", "\"-t\", kappa, \"--motifs\", motifs, \"--hotness\", hotness, \"--oformat\", oformat, \"--outname\", igphyml_out] if asr >=", "nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", clean=\"none\", nohlp=False, asr=-1, format=default_format, out_args=default_out_args): \"\"\"", "qcodons[qpos]: printDebug(\"Checking \" + scodons[spos]+ \"\\t\" + qcodons[qpos], debug) qpos += 1 if", "printDebug(\"But codon was apparently preserved\", debug) if \"IN-FRAME\" in log: log[\"IN-FRAME\"] = log[\"IN-FRAME\"]", "not None: log_handle.close() #printProgress(rec_count, rec_count, 0.05, start_time) log = OrderedDict() log[\"OUTPUT\"] = os.path.basename(pass_handle.name)", "by looking at the next codon over in the #alignment if scodons[i][2:3] !=", "clone. \"\"\" keys = list(useqs.keys()) join = {} # id -> sequence id", "meta_data is not None: seq, cid = seq_f.split(delim) cid = delim + cid.replace(\":\",", "conseqs = [] for j in range(0, nseqs): conseq = \"\".join([str(seq_rec) for seq_rec", ">= s_end: printDebug(\"Masked %s at position %d, at end of subject sequence\" %", "have ambiguous characters at polymorphic sites def buildTrees(db_file, meta_data=None, target_clones=None, collapse=False, ncdr3=False, nmask=False,", "i return -1 def rmCDR3(sequences, clones): \"\"\" Remove CDR3 from all sequences and", "files for IgPhyML output Arguments: clones (list): receptor objects within the same clone.", "parser parser_parent = getCommonArgParser(out_file=False, log=True, format=True) # Define argument parser parser = ArgumentParser(description=__doc__,", "len(qi), 3)] frameshifts = 0 s_end = 0 #adjust for the fact that", "fail_handle is not None: output[\"fail\"] = fail_handle.name fail_handle.close() if log_handle is not None:", "== \"..\" or scodons[-1] == \".\": scodons[-1] = \"...\" else: scodons[-1] = \"NNN\"", "list: deduplicated receptors within a clone. \"\"\" keys = list(useqs.keys()) join = {}", "out_args[\"out_name\"] is None: __, clone_name, __ = splitName(db_file) else: clone_name = out_args[\"out_name\"] if", "(list): list of Receptor objects. meta_data (str): Field to append to sequence IDs.", "mask split codons.\"\"\") group.add_argument(\"--md\", nargs=\"+\", action=\"store\", dest=\"meta_data\", help=\"\"\"List of fields to containing metadata", "This attempts to correct for this by looking at the next codon over", "in input sequence s_end (int): end of IMGT sequence qpos (int): starting position", "germline = clones[i].getField(\"germline_imgt_d_mask\") nseq = [] nimgtar = [] ngermline = [] ncdr3", "def hasPTC(sequence): \"\"\" Determines whether a PTC exits in a sequence Arguments: sequence", "same, move both forward spos += 1 qpos += 1 elif qcodons[qpos] ==", "import default_format from changeo.IO import splitName, getDbFields, getFormatOperators, getOutputHandle, getOutputName from changeo.Alignment import", "clean printLog(log) todelete = open(outrep) for line in todelete: line = line.rstrip(\"\\n\") line", "\"N\" + qi spos = i break else: spos = i break return", "sequence ID to ensure uniqueness.\"\"\") igphyml_group = parser.add_argument_group(\"IgPhyML arguments (see igphyml -h for", "seq_f cid = \"\" if meta_data is not None: seq, cid = seq_f.split(delim)", "receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"] = concatenated_seq return concatenated_seq, log def unAmbigDist(seq1, seq2,", "estimate + confidence interval\"\"\") igphyml_group.add_argument(\"-t\", action=\"store\", dest=\"kappa\", type=str, default=\"e\", choices=(\"e\", \"ce\"), help=\"\"\"Kappa parameters", "str(ncdr3)) useqs_f = OrderedDict() conseqs = [] for j in range(0, nseqs): conseq", "= {\"rec_count\":0, \"seq_fail\":0, \"nf_fail\":0, \"del_fail\":0, \"in_fail\":0, \"minseq_fail\":0, \"other_fail\":0, \"region_fail\":0, \"germlineptc\":0, \"fdcount\":0, \"totalreads\":0, \"passreads\":0,", "useqs[ki] joinseqs[kj] = ki # loop through list of joined sequences and collapse", "3): ros = receptor.sequence_input ris = receptor.sequence_imgt psite = receptor.v_seq_start - 1 +", "return dist def deduplicate(useqs, receptors, log=None, meta_data=None, delim=\":\"): \"\"\" Collapses identical sequences Argument:", "IgPhyML (--omega) kappa (str): kappa optimization in IgPhyML (-t) motifs (str): motifs to", "(str): motifs to use in IgPhyML (--motifs) hotness (str): motif in IgPhyML (--hotness)", "into clones.\") if r.dupcount is None: r.dupcount = 1 fails[\"rec_count\"] += 1 #printProgress(rec_count,", "useqs (dict): unique sequences within a clone. maps sequence to index in Receptor", "kappa log[\"wFWR,wCDR\"] = omega log[\"MOTIFS\"] = motifs log[\"HOTNESS\"] = hotness log[\"NPROC\"] = nproc", "clones[i].setField(\"germline_imgt_d_mask\", \"\".join(ngermline)) sequences[i] = \"\".join(nseq) #print(\"Length: \" + str(ncdr3)) def characterizePartitionErrors(sequences, clones, meta_data):", "< len(ros) and (pisite + 3) < len(ris): #cut out 1 or 2", "seq2[i] != \"-\" and seq2[i] != \".\": if seq1[i] != seq2[i]: dist +=", "# print(\"\\n\" + str(clones[i].sequence_id)) # print(\"\\n \" + str((sequences[i])) ) # print(\"\\n\" +", "log[\"START\"] = \"CLEANING\" log[\"SCOPE\"] = clean printLog(log) todelete = open(outrep) for line in", "only estimate GY94 trees and parameters format (str): input and output format. out_args", "= ncounti ambigchar[useqs[kj]] = ncountj # this algorithm depends on the fact that", "input sequence. spos (int) : starting position of IMGT sequence in input sequence.", "is False: printWarning(\"FUNCTIONAL column not found.\") found_no_funct = True all_records.append(r) if r.clone in", "if r.clone is None: printError(\"Cannot export datasets until sequences are clustered into clones.\")", "IgPhyML, if specified required fields: sequence_id, sequence, sequence_alignment, germline_alignment_d_mask or germline_alignment, v_call, j_call,", "sequence qcodons (list): list of codons in input sequence spos (int): starting position", "# distance from the sequence they will be collapse to. if ncountj >", "clone_name else: clone_dir = os.path.join(dir_name, clone_name) if not os.path.exists(clone_dir): os.makedirs(clone_dir) # Format options", "r.fwr4_imgt + (\".\"*(len(r.sequence_imgt) - len(simgt))) simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] +", "sequence qi = qi[(receptor.v_seq_start - 1):] #tally where --- gaps are in IMGT", "regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] nseq = r.sequence_imgt[len(simgt):len(r.sequence_imgt)] if len(simgt)", "cloneseqs[str(k)], meta_data=meta_data, collapse=collapse, ncdr3=ncdr3, logs=logs, fail_writer=fail_writer, out_dir=clone_dir, min_seq=min_seq) #If clone is too small,", "= estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"-t\", action=\"store\", dest=\"kappa\", type=str, default=\"e\",", "and scodons[spos] != qcodons[qpos]: printDebug(\"Checking \" + scodons[spos]+ \"\\t\" + qcodons[qpos], debug) qpos", "starting topologies p = subprocess.check_output(gy_args) except subprocess.CalledProcessError as e: print(\" \".join(gy_args)) print('error>', e.output,", "due to a frame-shift by repeating this method but with an edited input", "not the same within clone %d!\\n\" % c.clone,False) printError(c.getField(\"imgtpartlabels\"),False) printError(\"%s\\n%d\\n\" % (imgtar,j),False) for", "else: clone_name = out_args[\"out_name\"] if dir_name is None: clone_dir = clone_name else: clone_dir", "clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) germ_id = [\"GERM\"] if meta_data is not None:", "whether a PTC exits in a sequence Arguments: sequence (str): IMGT gapped sequence", "(\".\"*(len(r.sequence_imgt) - len(simgt))) simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"]", "printDebug(\"Masked %s at position %d, but couldn't find upstream match\" % (scodons[ospos], ospos),", "-len(cloneseqs[str(k)]) else: clonesizes[str(k)] = outputIgPhyML(clones[str(k)], cloneseqs[str(k)], meta_data=meta_data, collapse=collapse, ncdr3=ncdr3, logs=logs, fail_writer=fail_writer, out_dir=clone_dir, min_seq=min_seq)", "arguments\") group.add_argument(\"--collapse\", action=\"store_true\", dest=\"collapse\", help=\"\"\"If specified, collapse identical sequences before exporting to fasta.\"\"\")", "IgPhyML output Arguments: out_dir (str): directory for sequence files. useqs_f (dict): unique sequences", "log[\"SEQ_MASKED\"] = receptor.sequence_imgt return receptor.sequence_imgt, log else: curgap = 0 si = nsi", "contains log information for each sequence. fails (dict): counts of various sequence processing", "r.sequence_imgt[len(simgt):len(r.sequence_imgt)] if len(simgt) < len(r.sequence_imgt): simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] +", "printDebug(\"RUNNING %d\" % ins, debug) mout = maskSplitCodons(receptor, recursive=True) if mout[1][\"PASS\"]: #if debug:", "clone (index is value in useqs dict). log (collections.OrderedDict): log of sequence errors.", "dest=\"kappa\", type=str, default=\"e\", choices=(\"e\", \"ce\"), help=\"\"\"Kappa parameters to estimate: e = estimate, ce", "printDebug(\"%i:%i:%s\" % (s_end, len(scodons), scodons[s_end]), debug) s_end += 1 qpos = 0 if", "sid = clones[num].sequence_id.translate(transtable) + \"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) else:", "outputted data? if igphyml: runIgPhyML(pass_handle.name, igphyml_out=igphyml_out, clone_dir=clone_dir, nproc=nproc, optimization=optimization, omega=omega, kappa=kappa, motifs=motifs, hotness=hotness,", "collapse=collapse, ncdr3=ncdr3, logs=logs, fail_writer=fail_writer, out_dir=clone_dir, min_seq=min_seq) #If clone is too small, size is", "\"ce,e\", \"e,ce\", \"ce,ce\"), help=\"\"\"Omega parameters to estimate for FWR,CDR respectively: e = estimate,", "mutability.\"\"\") igphyml_group.add_argument(\"--hotness\", action=\"store\", dest=\"hotness\", type=str, default=\"e,e,e,e,e,e\", help=\"\"\"Mutability parameters to estimate: e = estimate,", "collapse: log[\"DUPLICATE\"] = fail_count - fails[\"seq_fail\"] log[\"END\"] = \"BuildTrees\" printLog(log) #Run IgPhyML on", "3)] != \"...\": tally += 1 tallies.append(tally) newseqs = [] # remove gap", "gap, move forward in imgt spos += 1 elif scodons[spos] == qcodons[qpos]: #", "= regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + nseq", "sorted(clonesizes, key=clonesizes.get, reverse=True): #print(key + \"\\t\" + str(clonesizes[key])) outfile = os.path.join(clone_dir, \"%s.fasta\" %", "else: #imgt_warn = \"\\n! IMGT FWR/CDR sequence columns not detected.\\n! Cannot run CDR/FWR", "str(spos) else: printDebug(\"Masked %s at position %d, but couldn't find upstream match\" %", "+ cid.replace(\":\", \"_\") sid = clones[num].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\")))", "nargs=\"+\", action=\"store\", dest=\"target_clones\", help=\"\"\"List of clone IDs to output, if specified.\"\"\") group.add_argument(\"--minseq\", action=\"store\",", "fails[\"seq_fail\"] -= clonesizes[str(k)] fails[\"minseq_fail\"] -= clonesizes[str(k)] fail_count = fails[\"rec_count\"] - pass_count # End", "gapped sequence in frame 1. Returns: int: negative if not PTCs, position of", "log[rfrom.sequence_id][\"COLLAPSEFROM\"] = k log[rfrom.sequence_id][\"FAIL\"] = \"Collapsed with \" + rto.sequence_id del useqs[k] return", "was apparently preserved\", debug) if \"IN-FRAME\" in log: log[\"IN-FRAME\"] = log[\"IN-FRAME\"] + \",\"", "tree estimation\" printLog(log) try: #check for igphyml executable subprocess.check_output([\"igphyml\"]) except: printError(\"igphyml not found", "- len(big_enough) if len(big_enough) == 0: printError(\"\\n\\nNo sequences found that match specified criteria.\",1)", "sequences. logs (dict): contains log information for each sequence. fails (dict): counts of", "subsampled (before deduplication).\"\"\") group.add_argument(\"--append\", nargs=\"+\", action=\"store\", dest=\"append\", help=\"\"\"List of columns to append to", "all_records: if target_clones is None or r.clone in target_clones: if init_clone_sizes[r.clone] >= min_seq:", "= r.sequence_imgt[len(simgt):len(r.sequence_imgt)] if len(simgt) < len(r.sequence_imgt): simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"]", ": modified germline of clonal lineage. conseqs (list) : consensus sequences. duplicate (bool)", "r.sequence_input log[\"SEQ_IMGT\"] = r.sequence_imgt logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"FWR/CDR", "format for IgPhyML (tab or txt) nohlp (bool): If True, only estimate GY94", "(bool): if False, do not attempt to mask split codons sample_depth (int): depth", "scodons: concatenated_seq += i # add --- gaps back to IMGT sequence ncon_seq", "\"\" for i in range(0, sites, 3): if tallies[i//3] > 0: newgerm.append(germline[i:(i+3)]) lcodon=germline[i:(i+3)]", "qpos, log, debug, recursive) if not log[\"PASS\"] and not recursive: log[\"FRAMESHIFTS\"] = frameshifts", "r.sequence_input logs[r.sequence_id] = log if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"nf_fail\"] += 1", "remove them for now gaps = [] ndotgaps = [] nsi = \"\"", "lineages-fail database records failed processing. igphyml-pass parameter estimates and lineage trees from running", "= delim + \"0\" sid = clones[j].sequence_id.translate(transtable)+\"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\",", "getDbFields, getFormatOperators, getOutputHandle, getOutputName from changeo.Alignment import RegionDefinition from changeo.Commandline import CommonHelpFormatter, checkArgs,", "Seq(\"\") for i in scodons: concatenated_seq += i # add --- gaps back", "printDebug(\"FRAMESHIFT of length %d!\" % ins, debug) log[\"FAIL\"] = \"SINGLE FRAME-SHIFTING INSERTION\" break", "out_type=\"tsv\") igphyml_out = None if igphyml: igphyml_out = getOutputName(db_file, out_label=\"igphyml-pass\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=oformat)", "position of interest in input sequence. ospos (int) : position of interest in", "% 3 != 0: printError(\"number of sites must be divisible by 3! len:", "of codons in input sequence spos (int): starting position of IMGT sequence in", "sequence 2 fbreak (bool): break after first difference found? Returns: int: number of", "len(imgt)): if imgt[j] != 108: nseq.append(newseqs[i][j]) if i == 0: ngerm.append(newgerm[j]) nimgt.append(imgt[j]) else:", "on outputted data? if igphyml: runIgPhyML(pass_handle.name, igphyml_out=igphyml_out, clone_dir=clone_dir, nproc=nproc, optimization=optimization, omega=omega, kappa=kappa, motifs=motifs,", "nothing (n), for IgPhyML.\"\"\") igphyml_group.add_argument(\"--omega\", action=\"store\", dest=\"omega\", type=str, default=\"e,e\", choices = (\"e\", \"ce\",", "= False logs[clones[j].sequence_id][\"FAIL\"] = \"Duplication of \" + clones[useqs_f[conseq_f]].sequence_id logs[clones[j].sequence_id][\"DUPLICATE\"]=True if fail_writer is", "for i in range(0, sites, 3): if tallies[i//3] > 0: newgerm.append(germline[i:(i+3)]) lcodon=germline[i:(i+3)] imgt.append(imgtar[i])", "and seq1[i] != \"-\" and seq1[i] != \".\": if seq2[i] != \"N\" and", "spos def checkFrameShifts(receptor, oqpos, ospos, log, debug): \"\"\" Checks whether a frameshift occured", "None: ski = keys[i] skj = keys[j] else: ski, cid = keys[i].split(delim) skj,", "% ins, debug) log[\"FAIL\"] = \"SINGLE FRAME-SHIFTING INSERTION\" break else: receptor.sequence_input = ros", "(str): Optimize combination of topology (t) branch lengths (l) and parameters (r) in", "and collapse keys = list(useqs.keys()) for k in keys: if useqs[k] in join:", "CommonHelpFormatter, checkArgs, getCommonArgParser, parseCommonArgs def correctMidCodonStart(scodons, qi, debug): \"\"\" Find and mask split", "type=str, default=\"none\", help=\"\"\"Delete intermediate files? none: leave all intermediate files; all: delete all", "PTC if found. \"\"\" ptcs = (\"TAA\", \"TGA\", \"TAG\", \"TRA\", \"TRG\", \"TAR\", \"TGR\",", "number of ACGT differences. \"\"\" if len(seq1) != len(seq2): printError(\"Sequences are not the", "log[\"MASKED\"] = str(spos) else: log[\"PASS\"] = False log[\"FAIL\"] = \"UNKNOWN\" def maskSplitCodons(receptor, recursive=False,", "of sequences will be excluded.\"\"\") group.add_argument(\"--sample\", action=\"store\", dest=\"sample_depth\", type=int, default=-1, help=\"\"\"Depth of reads", "print('error>', e.output, '<') printError(\"HLP tree building failed\") log = OrderedDict() log[\"OUTPUT\"] = igphyml_out", "of topology (t) branch lengths (l) and parameters (r) in IgPhyML. omega (str):", "j < len(germline): ngermline.append(germline[j]) nimgtar.append(imgtar[j]) else: ncdr3 += 1 clones[i].setField(\"imgtpartlabels\",nimgtar) clones[i].setField(\"germline_imgt_d_mask\", \"\".join(ngermline)) sequences[i]", "meta_data=None, collapse=False, ncdr3=False, logs=None, fail_writer=None, out_dir=None, min_seq=1): \"\"\" Create intermediate sequence alignment and", "that all sequences are compared pairwise, and all are zero # distance from", "= {} #sequence id -> number ATCG nucleotides for i in range(0,len(keys)-1): for", "0.05, start_time) ptcs = hasPTC(r.sequence_imgt) gptcs = hasPTC(r.getField(\"germline_imgt_d_mask\")) if gptcs >= 0: log", "if log_handle is not None: log_handle.close() #printProgress(rec_count, rec_count, 0.05, start_time) log = OrderedDict()", "that match specified criteria.\",1) if sample_depth > 0: random.shuffle(big_enough) total = 0 for", "observed data newgerm = [] imgt = [] for j in range(0, nseqs):", "nseq imgtpartlabels = [13] * len(regions[\"fwr1_imgt\"]) + [30] * len(regions[\"cdr1_imgt\"]) + [45] *", "interval (0-1).\"\"\") return parser if __name__ == \"__main__\": \"\"\" Parses command line arguments", "dir_name is None: clone_dir = clone_name else: clone_dir = os.path.join(dir_name, clone_name) if not", "= False logs[r.sequence_id][\"FAIL\"] = \"FWR/CDR error\" logs[r.sequence_id][\"FWRCDRSEQ\"] = simgt fails[\"seq_fail\"] += 1 fails[\"region_fail\"]", "-> sequence id to join with (least ambiguous chars) joinseqs = {} #", "if pass_handle is not None else None log[\"RECORDS\"] = fails[\"totalreads\"] log[\"INITIAL_FILTER\"] = fails[\"rec_count\"]", "this function called recursively? \"\"\" frameshifts = 0 while spos < s_end and", "True remove all CDR3s. nmask (bool): if False, do not attempt to mask", "True log[rfrom.sequence_id][\"COLLAPSETO\"] = joinseqs[k] log[rfrom.sequence_id][\"COLLAPSEFROM\"] = k log[rfrom.sequence_id][\"FAIL\"] = \"Collapsed with \" +", "if fail_handle is not None: output[\"fail\"] = fail_handle.name fail_handle.close() if log_handle is not", "sequence id to join with (least ambiguous chars) joinseqs = {} # id", "sequences. delim (str) : delimiter for extracting metadata from ID. newgerm (str) :", "spos += 1 qpos += 1 elif qcodons[qpos] == \"N\": # possible that", "if log is not None: log[rfrom.sequence_id][\"PASS\"] = False log[rfrom.sequence_id][\"DUPLICATE\"] = True log[rfrom.sequence_id][\"COLLAPSETO\"] =", "match specified criteria.\",1) if sample_depth > 0: random.shuffle(big_enough) total = 0 for r", "(list): list of sequences in clones. clones (list): list of Receptor objects. \"\"\"", "with different meta_data. meta_data (str): Field to append to sequence IDs. Splits identical", "if \"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" + str(len(scodons)) else: log[\"END-MASKED\"]", "fail_writer (changeo.IO.TSVWriter): failed sequences writer object. Returns: 0: returns 0 if an error", "for i in range(1, len(meta_data)): germ_id.append(\"GERM\") pass_handle.write(\"%s\\t%s\\t%s_%s\\t%s\\n\" % (outfile, \"N\", key,\"_\".join(germ_id), partfile)) handle.close()", "is not None: output[\"fail\"] = fail_handle.name fail_handle.close() if log_handle is not None: log_handle.close()", "meta_data[0] == \"DUPCOUNT\": cid = delim + \"0\" sid = clones[j].sequence_id.translate(transtable)+\"_1\" + cid", "indexes with clones parameter). meta_data (str): Field to append to sequence IDs. Splits", "if False, do not attempt to mask split codons sample_depth (int): depth of", "(changeo.IO.TSVWriter): failed sequences writer object. Returns: 0: returns 0 if an error occurs", "= r.sequence_input logs[r.sequence_id] = log if out_args[\"failed\"]: fail_writer.writeReceptor(r) fails[\"seq_fail\"] += 1 fails[\"nf_fail\"] +=", "sequence files. useqs_f (dict): unique sequences mapped to ids. meta_data (str): Field to", "= ki # loop through list of joined sequences and collapse keys =", "it's possible to start mid-codon qi,spos = correctMidCodonStart(scodons, qi, debug) qcodons = [qi[i:i", "in records: if r.functional is None: r.functional = True if found_no_funct is False:", "try: #get GY94 starting topologies p = subprocess.check_output(gy_args) except subprocess.CalledProcessError as e: print(\"", "except subprocess.CalledProcessError as e: print(\" \".join(gy_args)) print('error>', e.output, '<') printError(\"GY94 tree building in", "= r.sequence_imgt logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"Germline PTC\" fails[\"seq_fail\"]", "IgPhyML failed\") log = OrderedDict() log[\"START\"] = \"IgPhyML HLP analysis\" log[\"OPTIMIZE\"] = optimization", "of topology (t) branch lengths (l) and parameters (r), or nothing (n), for", "IMGT sequence and remove them for now gaps = [] ndotgaps = []", "return -len(conseqs) # Output fasta file of masked, concatenated sequences outputSeqPartFiles(out_dir, useqs_f, meta_data,", "just want to mask the #first codon in the IMGT seq, other times", "out_dir (str): directory for sequence files. useqs_f (dict): unique sequences mapped to ids.", "the same within clone %d!\\n\" % c.clone,False) printError(c.getField(\"imgtpartlabels\"),False) printError(\"%s\\n%d\\n\" % (imgtar,j),False) for j", "len( regions[\"fwr2_imgt\"]) + \\ [60] * len(regions[\"cdr2_imgt\"]) + [80] * len(regions[\"fwr3_imgt\"]) + \\", "recursively? \"\"\" frameshifts = 0 while spos < s_end and qpos < len(qcodons):", "--- gaps are in IMGT sequence and remove them for now gaps =", "if clonesizes[str(k)] > 0: nclones += 1 pass_count += clonesizes[str(k)] else: fails[\"seq_fail\"] -=", "%d, clone: %s , id: %s, seq: %s\" %(len(sequences[0]),\\ clones[0].clone,clones[0].sequence_id,sequences[0])) return imgtar, germline,", "meta_data. meta_data (str): Field to append to sequence IDs. Splits identical sequences with", "Splits identical sequences with different meta_data. delim (str): delimited to use when appending", "collapse=False, ncdr3=False, logs=None, fail_writer=None, out_dir=None, min_seq=1): \"\"\" Create intermediate sequence alignment and partition", "and output format. out_args (dict): arguments for output preferences. Returns: dict: dictionary of", "clonesizes[str(k)] else: fails[\"seq_fail\"] -= clonesizes[str(k)] fails[\"minseq_fail\"] -= clonesizes[str(k)] fail_count = fails[\"rec_count\"] - pass_count", "None: __, clone_name, __ = splitName(db_file) else: clone_name = out_args[\"out_name\"] if dir_name is", "to index in Receptor list. receptors (dict): receptors within a clone (index is", "be collapse to. if ncountj > ncounti: nci = 0 if useqs[ki] in", "help=\"\"\"Minimum number of data sequences. Any clones with fewer than the specified number", "# Format options try: reader, writer, __ = getFormatOperators(format) except ValueError: printError(\"Invalid format", "% (seq1, seq2)) dist = 0 for i in range(0,len(seq1)): if seq1[i] !=", "ngermline = [] ncdr3 = 0 #print(\"imgtarlen: \" + str(len(imgtar))) #print(\"seqlen: \" +", "\"N\" and seq2[i] != \"-\" and seq2[i] != \".\": if seq1[i] != seq2[i]:", "output, if specified.\"\"\") group.add_argument(\"--minseq\", action=\"store\", dest=\"min_seq\", type=int, default=1, help=\"\"\"Minimum number of data sequences.", "(str): Field to append to sequence IDs. Splits identical sequences with different meta_data.", "out_args=default_out_args): \"\"\" Masks codons split by alignment to IMGT reference, then produces input", "sequences in clones with only 1 sequence? imgtar, germline, sites, nseqs = characterizePartitionErrors(sequences,", "fails = {\"rec_count\":0, \"seq_fail\":0, \"nf_fail\":0, \"del_fail\":0, \"in_fail\":0, \"minseq_fail\":0, \"other_fail\":0, \"region_fail\":0, \"germlineptc\":0, \"fdcount\":0, \"totalreads\":0,", "= useqs[ki] joinseqs[kj] = ki # loop through list of joined sequences and", "ptcs = hasPTC(r.sequence_imgt) gptcs = hasPTC(r.getField(\"germline_imgt_d_mask\")) if gptcs >= 0: log = OrderedDict()", "fact that it's possible to start mid-codon qi,spos = correctMidCodonStart(scodons, qi, debug) qcodons", "FRAME-SHIFTING INSERTION\" break else: receptor.sequence_input = ros receptor.sequence_imgt = ris return frameshifts def", "type=int, default=-1, help=\"\"\"Depth of reads to be subsampled (before deduplication).\"\"\") group.add_argument(\"--append\", nargs=\"+\", action=\"store\",", "fails[\"del_fail\"] += 1 elif mout[1][\"FAIL\"] == \"SINGLE FRAME-SHIFTING INSERTION\": fails[\"in_fail\"] += 1 else:", "% key) if clonesizes[key] > 0: germ_id = [\"GERM\"] if meta_data is not", "\"TGR\", \"TRR\") for i in range(0, len(sequence), 3): if sequence[i:(i+3)] in ptcs: return", "#if no match for the adjacent codon was found, something\"s up. log[\"FAIL\"] =", "= open(outrep) for line in todelete: line = line.rstrip(\"\\n\") line = line.rstrip(\"\\r\") lsplit", "IMGT sequence in input sequence). \"\"\" spos = 0 for i in range(0,", "RegionDefinition(r.junction_length, amino_acid=False) regions = rd.getRegions(r.sequence_imgt) if regions[\"cdr3_imgt\"] is not \"\" and regions[\"cdr3_imgt\"] is", "not None: seq, cid = seq_f.split(delim) cid = delim + cid.replace(\":\", \"_\") sid", "IMGT gapped sequence in frame 1. Returns: int: negative if not PTCs, position", "objects within the same clone. sequences (list): sequences within the same clone (share", "meta_data=meta_data, collapse=collapse, ncdr3=ncdr3, logs=logs, fail_writer=fail_writer, out_dir=clone_dir, min_seq=min_seq) #If clone is too small, size", "== 1: ncon_seq = ncon_seq + \".\" elif i == 0: ncon_seq =", "format for IgPhyML (tab or txt) clean (str): delete intermediate files? (none, all)", "files? (none, all) nohlp (bool): If True, only estimate GY94 trees and parameters", "(sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) if nseqs == 1 and duplicate: if meta_data is not", "% r.sequence_id) mout[1][\"PASS\"] = False mout[1][\"FAIL\"] = \"PTC_ADDED_FROM_MASKING\" logs[mout[1][\"ID\"]] = mout[1] if mout[1][\"PASS\"]:", "deduplicate(useqs_f, clones, logs, meta_data, delim) if collapse and len(useqs_f) < min_seq: for seq_f,", "import partial # Presto and changeo imports from presto.Defaults import default_out_args from presto.IO", "reader(handle) fail_handle, fail_writer = None, None if out_args[\"failed\"]: fail_handle = getOutputHandle(db_file, out_label=\"lineages-fail\", out_dir=out_args[\"out_dir\"],", "sequence to index in Receptor list. receptors (dict): receptors within a clone (index", "= 0 if mask: findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log, debug, recursive)", "found_no_funct = False for r in records: if r.functional is None: r.functional =", "if correctseqs: maxlen = sites maximgt = len(imgtar) for j in range(0,len(sequences)): if", "dist += 1 if fbreak: break return dist def deduplicate(useqs, receptors, log=None, meta_data=None,", "clones (list): list of Receptor objects. meta_data (str): Field to append to sequence", "len(meta_data)) if dist == 0 and m_match: ncounti = ki.count(\"A\") + ki.count(\"T\") +", "cilog = outrep + \"_igphyml_CIlog.txt_hlp\" if os.path.isfile(cilog): os.remove(cilog) if oformat == \"tab\": os.rmdir(clone_dir)", "sequences and germline of a clone Arguments: sequences (list): list of sequences in", "OrderedDict from textwrap import dedent from time import time from Bio.Seq import Seq", "for i in scodons: concatenated_seq += i # add --- gaps back to", "len(useqs_f) == 1 and duplicate: if meta_data is not None: if meta_data[0] ==", "if total == sample_depth: break # Start processing clones clonesizes = {} pass_count,", "\\ regions[\"fwr3_imgt\"] + regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] if len(simgt) < len(r.sequence_imgt): r.fwr4_imgt = r.fwr4_imgt", "= seq_f.split(delim) cid = delim + cid.replace(\":\", \"_\") sid = clones[num].sequence_id.translate(transtable) + cid", "small: \" + str(len(useqs_f)) logs[clones[num].sequence_id][\"PASS\"] = False return -len(useqs_f) elif not collapse and", "ospos, log, debug) elif spos >= s_end or qcodons[qpos] != scodons[spos]: scodons[ospos] =", "the first sequence in clones, and the number of sequences in clones. \"\"\"", "(seq1, seq2)) dist = 0 for i in range(0,len(seq1)): if seq1[i] != \"N\"", "1 fails[\"nf_fail\"] += 1 return 0 # Run IgPhyML on outputed data def", "logs[mout[1][\"ID\"]] = mout[1] if mout[1][\"PASS\"]: #passreads += r.dupcount if r.clone in clones: clones[r.clone].append(r)", "and not recursive: log[\"FRAMESHIFTS\"] = frameshifts if len(scodons[-1]) != 3: if scodons[-1] ==", "for j in range(0, nseqs): cid = \"\" if meta_data is not None:", "print(receptor.sequence_id) # adjust starting position of query sequence qi = qi[(receptor.v_seq_start - 1):]", "choices = (\"e\", \"ce\", \"e,e\", \"ce,e\", \"e,ce\", \"ce,ce\"), help=\"\"\"Omega parameters to estimate for", "names = igf.readline().split(\"\\t\") vals = igf.readline().split(\"\\t\") for i in range(3,len(names)-1): log[names[i]] = round(float(vals[i]),2)", "clones, cloneseqs, logs, fails, out_args, fail_writer, mask=True): \"\"\" Masks codons split by alignment", "scodons[i][2] #sometimes IMGT will just cut off first letter if non-match, at which", "1 return 0 else: #imgt_warn = \"\\n! IMGT FWR/CDR sequence columns not detected.\\n!", "i printDebug(\"%i:%i:%s\" % (s_end, len(scodons), scodons[s_end]), debug) s_end += 1 qpos = 0", "scodons[spos] == \"...\": #possible next codon is just a gap spos += 1", "#first codon in the IMGT seq, other times it will be legitimately absent", "ncon_seq = ncon_seq + concatenated_seq[counter:] concatenated_seq = ncon_seq log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] =", "ID r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID r.sequence_id = r.sequence_id.replace(\")\",\"-\") #remove", "printError, printDebug from changeo.Defaults import default_format from changeo.IO import splitName, getDbFields, getFormatOperators, getOutputHandle,", "def getArgParser(): \"\"\" Defines the ArgumentParser Returns: argparse.ArgumentParser: argument parsers. \"\"\" # Define", "= [\"igphyml\", \"--repfile\", outfile, \"-m\", \"GY\", \"--run_id\", \"gy\", \"--outrep\", outrep, \"--threads\", str(nproc),\"--outname\",gyout] hlp_args", "= keys[j] else: ski, cid = keys[i].split(delim) skj, cid = keys[j].split(delim) ri =", "break else: spos = i break return qi, spos def checkFrameShifts(receptor, oqpos, ospos,", "len(imgtar) for j in range(0,len(sequences)): if len(sequences[j]) > maxlen: maxlen = len(sequences[j]) if", "(int) : position of interest in input sequence. ospos (int) : position of", "counts of various sequence processing failures. out_args (dict): arguments for output preferences. fail_writer", "OrderedDict() log[\"OUTPUT\"] = igphyml_out if oformat == \"tab\": igf = open(igphyml_out) names =", "len(scodons)): printDebug(\"%s %s\" % (scodons[i], qi[0:3]), debug) if scodons[i] != \"...\": if scodons[i][0:2]", "(list): list of Receptor objects. \"\"\" for i in range(0,len(sequences)): imgtar = clones[i].getField(\"imgtpartlabels\")", "clones. \"\"\" sites = len(sequences[0]) nseqs = len(sequences) imgtar = clones[0].getField(\"imgtpartlabels\") germline =", "regions[\"fwr2_imgt\"] + regions[\"cdr2_imgt\"] + \\ regions[\"fwr3_imgt\"] nseq = r.sequence_imgt[len(simgt):len(r.sequence_imgt)] if len(simgt) < len(r.sequence_imgt):", "+ \\ [60] * len(regions[\"cdr2_imgt\"]) + [80] * len(regions[\"fwr3_imgt\"]) + \\ [108] *", "else: for j in range(0, nseqs): cid = \"\" if meta_data is not", "Converts TSV files into IgPhyML input files \"\"\" # Info __author__ = \"<NAME>\"", "germline of clonal lineage. conseqs (list) : consensus sequences. duplicate (bool) : duplicate", "si = nsi scodons = [si[i:i + 3] for i in range(0, len(si),", "different meta_data collapse (bool): if True collapse identical sequences. ncdr3 (bool): if True", "to parallelize IgPhyML across optimization (str): Optimize combination of topology (t) branch lengths", "of length %d!\" % ins, debug) log[\"FAIL\"] = \"SINGLE FRAME-SHIFTING INSERTION\" break else:", "scodons (list): list of codons in IMGT sequence. qi (str) : input sequence.", "m in meta_data: meta_data_list.append(clones[j].getField(m).replace(\":\", \"_\")) cid = delim + str(delim.join(meta_data_list)) sid = clones[j].sequence_id.translate(transtable)", "of sequence errors. meta_data (str): Field to append to sequence IDs. Splits identical", "Call main for each input file for f in args.__dict__[\"db_files\"]: args_dict[\"db_file\"] = f", "and scan forward until you find a codon that matches next site if", "log, debug) elif spos >= s_end or qcodons[qpos] != scodons[spos]: scodons[ospos] = \"NNN\"", "estimate GY94 trees and parameters clean (str): delete intermediate files? (none, all) \"\"\"", "nseqs): conseq = \"\".join([str(seq_rec) for seq_rec in newseqs[j]]) if meta_data is not None:", "+= 1 fails[\"region_fail\"] += 1 return 0 elif regions[\"fwr3_imgt\"] is not \"\" and", "input sequence if not recursive: frameshifts += checkFrameShifts(receptor, oqpos, ospos, log, debug) elif", "of various sequence processing failures. out_args (dict): arguments for output preferences. fail_writer (changeo.IO.TSVWriter):", "of sequences per clone append (str): column name to append to sequence_id igphyml", "ambiguous chars) ambigchar = {} #sequence id -> number ATCG nucleotides for i", "= {} logs = OrderedDict() fails = {\"rec_count\":0, \"seq_fail\":0, \"nf_fail\":0, \"del_fail\":0, \"in_fail\":0, \"minseq_fail\":0,", "delim + \"0\" sid = clones[num].sequence_id.translate(transtable) + \"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"),", "(bool) : was this method part of a recursive call? mask (bool) :", "keys[j] if meta_data is None: ski = keys[i] skj = keys[j] else: ski,", "\"NNN\": s_end = i printDebug(\"%i:%i:%s\" % (s_end, len(scodons), scodons[s_end]), debug) s_end += 1", "#printProgress(rec_count, rec_count, 0.05, start_time) ptcs = hasPTC(r.sequence_imgt) gptcs = hasPTC(r.getField(\"germline_imgt_d_mask\")) if gptcs >=", "= getOutputHandle(db_file, out_label=out_label, out_dir=out_args[\"out_dir\"], out_name= out_args[\"out_name\"], out_type=\"tsv\") igphyml_out = None if igphyml: igphyml_out", "Returns: dict: dictionary of output pass and fail files. \"\"\" # Print parameter", "among sequences in the same clone.\", \"Be sure to cluster sequences into clones", "len: %d, clone: %s , id: %s, seq: %s\" %(len(sequences[0]),\\ clones[0].clone,clones[0].sequence_id,sequences[0])) return imgtar,", "a clone. \"\"\" keys = list(useqs.keys()) join = {} # id -> sequence", "meta_data is not None: if meta_data[0] == \"DUPCOUNT\": cid = delim + \"0\"", "#find any gaps not divisible by three curgap = 0 for i in", "= \"\" counter = 0 for i in gaps: #print(str(i) + \":\" +", "= r.sequence_id + \"_\" + r.getField(m) total += maskCodonsLoop(r, clones, cloneseqs, logs, fails,", "False for r in records: if r.functional is None: r.functional = True if", "in clones: clones[r.clone].append(r) cloneseqs[r.clone].append(mask_seq) else: clones[r.clone] = [r] cloneseqs[r.clone] = [mask_seq] return 1", "\"_\" duplicate = True # duplicate sequences in clones with only 1 sequence?", "1 seq2 (str): sequence 2 fbreak (bool): break after first difference found? Returns:", "log[\"FRAMESHIFTS\"] = frameshifts if len(scodons[-1]) != 3: if scodons[-1] == \"..\" or scodons[-1]", "sites, nseqs = characterizePartitionErrors(sequences, clones, meta_data) tallies = [] for i in range(0,", "conseq = \"\".join([str(seq_rec) for seq_rec in newseqs[j]]) if meta_data is not None: meta_data_list", "for output preferences. Returns: dict: dictionary of output pass and fail files. \"\"\"", "\"ce,ce\"), help=\"\"\"Omega parameters to estimate for FWR,CDR respectively: e = estimate, ce =", "sequences. nseqs (int): number of sequences. delim (str) : delimiter for extracting metadata", "apparently preserved\", debug) if \"IN-FRAME\" in log: log[\"IN-FRAME\"] = log[\"IN-FRAME\"] + \",\" +", "elif qpos >= len(qcodons) and spos < s_end: printDebug(\"FAILING MATCH\", debug) log[\"PASS\"] =", "OrderedDict() log[\"OUTPUT\"] = os.path.basename(pass_handle.name) if pass_handle is not None else None log[\"RECORDS\"] =", "r.setField(\"imgtpartlabels\", imgtpartlabels) mout = maskSplitCodons(r, mask=mask) mask_seq = mout[0] ptcs = hasPTC(mask_seq) if", "ensure uniqueness.\"\"\") igphyml_group = parser.add_argument_group(\"IgPhyML arguments (see igphyml -h for details)\") igphyml_group.add_argument(\"--igphyml\", action=\"store_true\",", ">= 0: printWarning(\"Masked sequence suddenly has a PTC.. %s\\n\" % r.sequence_id) mout[1][\"PASS\"] =", "scodons, qcodons, spos, s_end, qpos, log, debug, recursive) if not log[\"PASS\"] and not", "for r in records: if r.functional is None: r.functional = True if found_no_funct", "hasPTC(sequence): \"\"\" Determines whether a PTC exits in a sequence Arguments: sequence (str):", "to use in IgPhyML (--motifs) hotness (str): motif in IgPhyML (--hotness) oformat (str):", "seq, cid = seq_f.split(delim) cid = delim + cid.replace(\":\", \"_\") sid = clones[num].sequence_id.translate(transtable)", "debugging statements? recursive (bool): was this function called recursively? \"\"\" frameshifts = 0", "len(sequence), 3): if sequence[i:(i+3)] in ptcs: return i return -1 def rmCDR3(sequences, clones):", "sequence 1 seq2 (str): sequence 2 fbreak (bool): break after first difference found?", ": input sequence. spos (int) : starting position of IMGT sequence in input", "or simgt != r.sequence_imgt: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone", "if conseq_f in useqs_f and collapse: clones[useqs_f[conseq_f]].dupcount += clones[j].dupcount logs[clones[j].sequence_id][\"PASS\"] = False logs[clones[j].sequence_id][\"FAIL\"]", "debug): \"\"\" Find and mask split codons Arguments: scodons (list): list of codons", "objects. meta_data (str): Field to append to sequence IDs. Splits identical sequences with", "clonal positions . \"\"\" # bootstrap these data if desired lg = len(newgerm)", "\" + clones[useqs_f[conseq_f]].sequence_id logs[clones[j].sequence_id][\"DUPLICATE\"]=True if fail_writer is not None: fail_writer.writeReceptor(clones[j]) else: useqs_f[conseq_f] =", "if len(simgt) < len(r.sequence_imgt): simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\", "confidence interval\"\"\") igphyml_group.add_argument(\"--motifs\", action=\"store\", dest=\"motifs\", type=str, default=\"WRC_2:0,GYW_0:1,WA_1:2,TW_0:3,SYC_2:4,GRS_0:5\", help=\"\"\"Which motifs to estimate mutability.\"\"\") igphyml_group.add_argument(\"--hotness\",", "help=\"\"\"Ancestral sequence reconstruction interval (0-1).\"\"\") return parser if __name__ == \"__main__\": \"\"\" Parses", "is not \"\" and regions[\"cdr3_imgt\"] is not None: simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"]", "spos (int) : starting position of IMGT sequence in input sequence. debug (bool)", "help=\"\"\"List of clone IDs to output, if specified.\"\"\") group.add_argument(\"--minseq\", action=\"store\", dest=\"min_seq\", type=int, default=1,", "scodons[spos]+ \"\\t\" + qcodons[qpos], debug) qpos += 1 if qcodons[qpos-1] == scodons[ospos]: #if", "intermediate files.\"\"\") igphyml_group.add_argument(\"--optimize\", action=\"store\", dest=\"optimization\", type=str, default=\"lr\", choices=(\"n\",\"r\",\"l\",\"lr\",\"tl\",\"tlr\"), help=\"\"\"Optimize combination of topology (t)", "print(\"checking %s at position %d %d\" % (scodons[spos], spos, qpos)) ospos=spos oqpos=qpos spos", "os.path.exists(clone_dir): os.makedirs(clone_dir) # Format options try: reader, writer, __ = getFormatOperators(format) except ValueError:", "clones.\") if r.dupcount is None: r.dupcount = 1 fails[\"rec_count\"] += 1 #printProgress(rec_count, rec_count,", "\"...\" else: scodons[-1] = \"NNN\" if \"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] +", "* len(regions[\"fwr1_imgt\"]) + [30] * len(regions[\"cdr1_imgt\"]) + [45] * len( regions[\"fwr2_imgt\"]) + \\", "preferences. Returns: dict: dictionary of output pass and fail files. \"\"\" # Print", "\"w\") as clonef: if collapse: for seq_f, num in useqs_f.items(): seq = seq_f", "\" + str((sequences[i])) ) # print(\"\\n\" + str((germline))) for j in range(0,len(imgtar)): if", "else: ncdr3 += 1 clones[i].setField(\"imgtpartlabels\",nimgtar) clones[i].setField(\"germline_imgt_d_mask\", \"\".join(ngermline)) sequences[i] = \"\".join(nseq) #print(\"Length: \" +", "len(sequences[j]) if len(clones[j].getField(\"imgtpartlabels\")) > maximgt: imgtar = clones[j].getField(\"imgtpartlabels\") maximgt = len(imgtar) sites =", "log[\"END-MASKED\"] = str(spos) else: printDebug(\"Masked %s at position %d, but couldn't find upstream", "ncdr3 += 1 newseqs[i] = nseq newgerm = ngerm imgt = nimgt #print(\"Length:", "is not None: for m in append: r.sequence_id = r.sequence_id + \"_\" +", "printWarning(\"Masked sequence suddenly has a PTC.. %s\\n\" % r.sequence_id) mout[1][\"PASS\"] = False mout[1][\"FAIL\"]", "r (changeo.Receptor.Receptor): receptor object for a particular sequence. clones (list): list of receptors.", "clones clonesizes = {} pass_count, nclones = 0, 0 printMessage(\"Processing clones\", start_time=start_time, width=50)", "choices=(\"tab\", \"txt\"), help=\"\"\"IgPhyML output format.\"\"\") igphyml_group.add_argument(\"--nohlp\", action=\"store_true\", dest=\"nohlp\", help=\"\"\"Don't run HLP model?\"\"\") igphyml_group.add_argument(\"--asr\",", "found_no_funct is False: printWarning(\"FUNCTIONAL column not found.\") found_no_funct = True r.sequence_id = r.sequence_id.replace(\",\",\"-\")", "str(spos) else: log[\"IN-FRAME\"] = str(spos) elif qpos >= len(qcodons) and spos < s_end:", "#remove colons from sequence ID r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID", "md = md.replace(\":\",\"-\") #remove colons from metadata md = md.replace(\",\",\"-\") #remove commas from", "collapse identical sequences before exporting to fasta.\"\"\") group.add_argument(\"--ncdr3\", action=\"store_true\", dest=\"ncdr3\", help=\"\"\"If specified, remove", "clonesizes[str(k)] > 0: nclones += 1 pass_count += clonesizes[str(k)] else: fails[\"seq_fail\"] -= clonesizes[str(k)]", "import __version__, __date__ # Imports import os import random import subprocess import multiprocessing", "default=\"e\", choices=(\"e\", \"ce\"), help=\"\"\"Kappa parameters to estimate: e = estimate, ce = estimate", "= maskSplitCodons(r, mask=mask) mask_seq = mout[0] ptcs = hasPTC(mask_seq) if ptcs >= 0:", "not recursive: frameshifts += checkFrameShifts(receptor, oqpos, ospos, log, debug) elif spos >= s_end", "def outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt): \"\"\"", "fail_handle.name fail_handle.close() if log_handle is not None: log_handle.close() #printProgress(rec_count, rec_count, 0.05, start_time) log", "1 fails[\"region_fail\"] += 1 return 0 elif regions[\"fwr3_imgt\"] is not \"\" and regions[\"fwr3_imgt\"]", "+ str(len(sequences[i]))) #print(\"germline: \" + str(len(germline))) #if len(germline) < len(sequences[i]): # print(\"\\n\" +", "cid = \"\" if meta_data is not None: seq, cid = seq_f.split(delim) cid", "clones[j].setField(meta_data[m],clones[j].getField(meta_data[m]).replace(\"_\", \"\")) meta_data_list.append(str(clones[j].getField(meta_data[m]))) conseq_f = \"\".join([str(seq_rec) for seq_rec in newseqs[j]])+delim+\":\".join(meta_data_list) else: conseq_f =", "(list): list of receptors. cloneseqs (list): list of masked clone sequences. logs (dict):", "nsi scodons = [si[i:i + 3] for i in range(0, len(si), 3)] #", "maskSplitCodons(receptor, recursive=False, mask=True): \"\"\" Identify junction region by IMGT definition. Arguments: receptor (changeo.Receptor.Receptor):", "newgerm[-1] + \"N\" elif len(lcodon) == 1: newgerm[-1] = newgerm[-1] + \"NN\" if", "python3 \"\"\" Converts TSV files into IgPhyML input files \"\"\" # Info __author__", "scodons[spos] == qcodons[qpos]: # if both are the same, move both forward spos", "parameters/trees p = subprocess.check_output(hlp_args) except subprocess.CalledProcessError as e: print(\" \".join(hlp_args)) print('error>', e.output, '<')", "imgtar = clones[0].getField(\"imgtpartlabels\") germline = clones[0].getField(\"germline_imgt_d_mask\") if germline is \"\": germline = clones[0].getField(\"germline_imgt\")", "curgap = 0 si = nsi scodons = [si[i:i + 3] for i", "was preserved qpos -= 1 spos = ospos printDebug(\"But codon was apparently preserved\",", "conseqs, duplicate, imgt) if collapse: return len(useqs_f) else: return nseqs def maskCodonsLoop(r, clones,", "(str): delimited to use when appending meta_data. Returns: list: deduplicated receptors within a", "if out_args[\"failed\"]: fail_handle = getOutputHandle(db_file, out_label=\"lineages-fail\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=out_args[\"out_type\"]) fail_writer = writer(fail_handle, fields=out_fields)", "extracting metadata from ID. newgerm (str) : modified germline of clonal lineage. conseqs", "Arguments: sequence (str): IMGT gapped sequence in frame 1. Returns: int: negative if", "todelete: line = line.rstrip(\"\\n\") line = line.rstrip(\"\\r\") lsplit = line.split(\"\\t\") if len(lsplit) ==", "return len(useqs_f) else: return nseqs def maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer,", "= \"CLEANING\" log[\"SCOPE\"] = clean printLog(log) todelete = open(outrep) for line in todelete:", "* len(regions[\"cdr2_imgt\"]) + [80] * len(regions[\"fwr3_imgt\"]) + \\ [108] * int(len(nseq)) r.setField(\"imgtpartlabels\", imgtpartlabels)", "* len(regions[\"fwr4_imgt\"]) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt) or simgt != r.sequence_imgt: log", "ArgumentParser Returns: argparse.ArgumentParser: argument parsers. \"\"\" # Define input and output field help", "# deal with the fact that it's possible to start mid-codon qi,spos =", "running IgPhyML, if specified required fields: sequence_id, sequence, sequence_alignment, germline_alignment_d_mask or germline_alignment, v_call,", "lg = len(newgerm) sites = range(0, lg) transtable = clones[0].sequence_id.maketrans(\" \", \"_\") outfile", "\"FWR/CDR error\" logs[r.sequence_id][\"FWRCDRSEQ\"] = simgt fails[\"seq_fail\"] += 1 fails[\"region_fail\"] += 1 return 0", "= [] ndotgaps = [] nsi = \"\" for i in range(0,len(si)): if", "spos += 1 qpos += 1 while spos < s_end and scodons[spos] ==", "= {} big_enough = [] all_records = [] found_no_funct = False for r", "s = \"\" delim = \"_\" duplicate = True # duplicate sequences in", "the same, mask IMGT at that site and scan forward until you find", "#print(key + \"\\t\" + str(clonesizes[key])) outfile = os.path.join(clone_dir, \"%s.fasta\" % key) partfile =", "buildTrees(db_file, meta_data=None, target_clones=None, collapse=False, ncdr3=False, nmask=False, sample_depth=-1, min_seq=1,append=None, igphyml=False, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\",", "if collapse: useqs_f = deduplicate(useqs_f, clones, logs, meta_data, delim) if collapse and len(useqs_f)", "# Open output files out_label = \"lineages\" pass_handle = getOutputHandle(db_file, out_label=out_label, out_dir=out_args[\"out_dir\"], out_name=", "sequences within a clone. maps sequence to index in Receptor list. receptors (dict):", "with different meta_data. delim (str): delimited to use when appending meta_data. Returns: list:", "chars) ambigchar = {} #sequence id -> number ATCG nucleotides for i in", "qcodons[qpos] != \"...\": #if IMGT gap, move forward in imgt spos += 1", "== scodons[spos]: printDebug(\"Masked %s at position %d\" % (scodons[ospos], ospos), debug) scodons[ospos] =", "chars) joinseqs = {} # id -> useq to join with (least ambiguous", "and (pisite + 3) < len(ris): #cut out 1 or 2 nucleotides downstream", "regions[\"cdr3_imgt\"] + regions[\"fwr4_imgt\"] if len(simgt) < len(r.sequence_imgt): r.fwr4_imgt = r.fwr4_imgt + (\".\"*(len(r.sequence_imgt) -", "on this data.\\n\" imgtpartlabels = [0] * len(r.sequence_imgt) r.setField(\"imgtpartlabels\", imgtpartlabels) mout = maskSplitCodons(r,", "cloneseqs[r.clone].append(mask_seq) else: clones[r.clone] = [r] cloneseqs[r.clone] = [mask_seq] return 1 else: if out_args[\"failed\"]:", "\"\\t\" + qcodons[qpos], debug) qpos += 1 if qcodons[qpos-1] == scodons[ospos]: #if codon", "!= 3: if scodons[-1] == \"..\" or scodons[-1] == \".\": scodons[-1] = \"...\"", "out_label=out_label, out_dir=out_args[\"out_dir\"], out_name= out_args[\"out_name\"], out_type=\"tsv\") igphyml_out = None if igphyml: igphyml_out = getOutputName(db_file,", "FWR/CDR sequence columns not detected.\\n! Cannot run CDR/FWR partitioned model on this data.\\n\"", "+ \"0\" sid = clones[num].sequence_id.translate(transtable) + \"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\",", "clones[0].clone) with open(outfile, \"w\") as clonef: if collapse: for seq_f, num in useqs_f.items():", "for m in range(0,len(meta_data)): md = r.getField(meta_data[m]) md = md.replace(\",\",\"-\") #remove commas from", "from changeo import __version__, __date__ # Imports import os import random import subprocess", "else: spos = i break return qi, spos def checkFrameShifts(receptor, oqpos, ospos, log,", "for use with igphyml? Returns: str: modified IMGT gapped sequence. log: dict of", "False #if no match for the adjacent codon was found, something\"s up. log[\"FAIL\"]", "rec_count, 0.05, start_time) ptcs = hasPTC(r.sequence_imgt) gptcs = hasPTC(r.getField(\"germline_imgt_d_mask\")) if gptcs >= 0:", "to do that yourself.\") log = OrderedDict() log[\"END\"] = \"IgPhyML analysis\" printLog(log) #", "len(scodons[-1]) != 3: if scodons[-1] == \"..\" or scodons[-1] == \".\": scodons[-1] =", "+ \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"] + nseq imgtpartlabels = [13] * len(regions[\"fwr1_imgt\"]) +", "elif i == 0: ncon_seq = ncon_seq + concatenated_seq[counter] counter += 1 ncon_seq", "sample_depth=-1, min_seq=1,append=None, igphyml=False, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", clean=\"none\", nohlp=False, asr=-1,", "clones[j].setField(\"imgtpartlabels\",cimgt) if meta_data is not None: meta_data_ar = meta_data[0].split(\",\") for c in clones:", "clonef.write(\"\\n\") #output partition file partfile = os.path.join(out_dir, \"%s.part.txt\" % clones[0].clone) with open(partfile, \"w\")", "times it will be legitimately absent from the query, at which point #we", "from all sequences.\"\"\") group.add_argument(\"--nmask\", action=\"store_true\", dest=\"nmask\", help=\"\"\"If specified, do not attempt to mask", "is not None: for i in range(1, len(meta_data)): germ_id.append(\"GERM\") pass_handle.write(\"%s\\t%s\\t%s_%s\\t%s\\n\" % (outfile, \"N\",", "sid = clones[num].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) if len(useqs_f) ==", "if __name__ == \"__main__\": \"\"\" Parses command line arguments and calls main \"\"\"", "\"N\" elif len(lcodon) == 1: newgerm[-1] = newgerm[-1] + \"NN\" if ncdr3: ngerm", "error\" logs[r.sequence_id][\"FWRCDRSEQ\"] = simgt fails[\"seq_fail\"] += 1 fails[\"region_fail\"] += 1 return 0 elif", "0: if curgap % 3 != 0 : printDebug(\"Frame-shifting gap detected! Refusing to", "md.replace(\":\",\"-\") #remove colons from metadata md = md.replace(\",\",\"-\") #remove commas from metadata md", "if tallies[i//3] > 0: newseqs[j].append(sequences[j][i:(i+3)]) lcodon = \"\" for i in range(0, sites,", "log[\"FAIL\"] = \"SINGLE FRAME-SHIFTING INSERTION\" break else: receptor.sequence_input = ros receptor.sequence_imgt = ris", "in logs.keys(): printLog(logs[j], handle=log_handle) pass_handle.write(str(nclones)+\"\\n\") for key in sorted(clonesizes, key=clonesizes.get, reverse=True): #print(key +", "#check for igphyml executable subprocess.check_output([\"igphyml\"]) except: printError(\"igphyml not found :-/\") try: #get GY94", "if ri.getField(m) == rj.getField(m) and m != \"DUPCOUNT\": matches += 1 m_match =", "the fact that it's possible to start mid-codon qi,spos = correctMidCodonStart(scodons, qi, debug)", "curgap != 0: if curgap % 3 != 0 : printDebug(\"Frame-shifting gap detected!", "different meta_data target_clones (str): List of clone IDs to analyze. collapse (bool): if", "of receptors. cloneseqs (list): list of masked clone sequences. logs (dict): contains log", "in range(0,len(imgtar)): if imgtar[j] != 108: nseq.append(sequences[i][j]) if j < len(germline): ngermline.append(germline[j]) nimgtar.append(imgtar[j])", "3):] # Debug sequence modifications printDebug(ros, debug) printDebug(receptor.sequence_input, debug) printDebug(ris, debug) printDebug(receptor.sequence_imgt, debug)", "IMGT sequence qpos (int): starting position of input sequence in IMGT sequence log", "ngermline is \"\": ngermline = c.getField(\"germline_imgt\") if ngermline != germline: resolveglines = True", "= \"<NAME>\" from changeo import __version__, __date__ # Imports import os import random", "not None: if meta_data[0] == \"DUPCOUNT\": cid = delim + \"0\" sid =", "a gap spos += 1 while qpos < len(qcodons) and spos < s_end", "objects. \"\"\" for i in range(0,len(sequences)): imgtar = clones[i].getField(\"imgtpartlabels\") germline = clones[i].getField(\"germline_imgt_d_mask\") nseq", "import time from Bio.Seq import Seq from functools import partial # Presto and", "when appending meta_data. Returns: list: deduplicated receptors within a clone. \"\"\" keys =", "successfully processed records. lineages-fail database records failed processing. igphyml-pass parameter estimates and lineage", "algorithm depends on the fact that all sequences are compared pairwise, and all", "clonal lineage. conseqs (list) : consensus sequences. duplicate (bool) : duplicate sequence if", "len(newgerm))) partf.write(\"FWR:IMGT\\n\") partf.write(\"CDR:IMGT\\n\") partf.write(\"%s\\n\" % (clones[0].v_call.split(\"*\")[0])) partf.write(\"%s\\n\" % (clones[0].j_call.split(\"*\")[0])) partf.write(\",\".join(map(str, imgt))) partf.write(\"\\n\") def", "fails[\"in_fail\"] += 1 else: fails[\"other_fail\"] += 1 else: log = OrderedDict() log[\"ID\"] =", "= [\"GERM\"] if meta_data is not None: for i in range(1, len(meta_data)): germ_id.append(\"GERM\")", "\"\"\" Find and mask split codons Arguments: scodons (list): list of codons in", "not found.\") found_no_funct = True r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID", "only one in a clone. imgt (list) : IMGT numbering of clonal positions", "at end of subject sequence\" % (scodons[ospos], ospos), debug) if \"END-MASKED\" in log:", "+ str(spos) else: log[\"MASKED\"] = str(spos) else: log[\"PASS\"] = False log[\"FAIL\"] = \"UNKNOWN\"", "outputed data def runIgPhyML(outfile, igphyml_out, clone_dir, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\",", "IgPhyML.\"\"\") igphyml_group.add_argument(\"--omega\", action=\"store\", dest=\"omega\", type=str, default=\"e,e\", choices = (\"e\", \"ce\", \"e,e\", \"ce,e\", \"e,ce\",", "[] ncdr3 = 0 for j in range(0, len(imgt)): if imgt[j] != 108:", "sequences with different meta_data. clones (list) : list of receptor objects. collapse (bool)", "= str(spos) elif qpos >= len(qcodons) and spos < s_end: printDebug(\"FAILING MATCH\", debug)", "(before deduplication).\"\"\") group.add_argument(\"--append\", nargs=\"+\", action=\"store\", dest=\"append\", help=\"\"\"List of columns to append to sequence", "is not None: log[rfrom.sequence_id][\"PASS\"] = False log[rfrom.sequence_id][\"DUPLICATE\"] = True log[rfrom.sequence_id][\"COLLAPSETO\"] = joinseqs[k] log[rfrom.sequence_id][\"COLLAPSEFROM\"]", "maskSplitCodons(receptor, recursive=True) if mout[1][\"PASS\"]: #if debug: receptor.sequence_input = ros receptor.sequence_imgt = ris frameshifts", "from metadata md = md.replace(\",\",\"-\") #remove commas from metadata md = md.replace(\")\",\"-\") #remove", "log[\"NONFUNCTIONAL\"] = fails[\"nf_fail\"] log[\"FRAMESHIFT_DEL\"] = fails[\"del_fail\"] log[\"FRAMESHIFT_INS\"] = fails[\"in_fail\"] log[\"CLONETOOSMALL\"] = fails[\"minseq_fail\"] log[\"CDRFWR_ERROR\"]", "\" + str(len(imgtar))) #print(\"seqlen: \" + str(len(sequences[i]))) #print(\"germline: \" + str(len(germline))) #if len(germline)", "of length four containing a list of IMGT positions for first sequence in", "sequence information \"\"\" debug = False qi = receptor.sequence_input si = receptor.sequence_imgt log", "= receptors[useqs[ki]] rj = receptors[useqs[kj]] dist = unAmbigDist(ski, skj, True) m_match = True", "newseqs[j]]) if meta_data is not None: meta_data_list = [] for m in range(0,len(meta_data)):", "action=\"store\", dest=\"nproc\", type=int, default=1, help=\"\"\"Number of threads to parallelize IgPhyML across.\"\"\") igphyml_group.add_argument(\"--clean\", action=\"store\",", "in range(0,len(sequences)): if len(sequences[j]) > maxlen: maxlen = len(sequences[j]) if len(clones[j].getField(\"imgtpartlabels\")) > maximgt:", "ID r.sequence_id = r.sequence_id.replace(\"(\",\"-\") #remove parenthesis from sequence ID if(meta_data is not None):", "len(sequences)): printError(\"%s\\n%s\\n\" % (sequences[j],clones[j].getField(\"imgtpartlabels\")),False) printError(\"ChangeO file needs to be corrected\") for j in", "mapped to ids. meta_data (str): Field to append to sequence IDs. Splits identical", "if meta_data is not None: meta_data_list = [] for m in meta_data: meta_data_list.append(clones[j].getField(m).replace(\":\",", "def maskSplitCodons(receptor, recursive=False, mask=True): \"\"\" Identify junction region by IMGT definition. Arguments: receptor", "dest=\"optimization\", type=str, default=\"lr\", choices=(\"n\",\"r\",\"l\",\"lr\",\"tl\",\"tlr\"), help=\"\"\"Optimize combination of topology (t) branch lengths (l) and", "printWarning(\"FUNCTIONAL column not found.\") found_no_funct = True all_records.append(r) if r.clone in init_clone_sizes: init_clone_sizes[r.clone]", "Checks whether a frameshift occured in a sequence Arguments: receptor (changeo.Receptor.Receptor): Receptor object.", "cid = keys[i].split(delim) skj, cid = keys[j].split(delim) ri = receptors[useqs[ki]] rj = receptors[useqs[kj]]", "nseq = [] nimgtar = [] ngermline = [] ncdr3 = 0 #print(\"imgtarlen:", "collapse: clones[useqs_f[conseq_f]].dupcount += clones[j].dupcount logs[clones[j].sequence_id][\"PASS\"] = False logs[clones[j].sequence_id][\"FAIL\"] = \"Duplication of \" +", "nseqs def maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer, mask=True): \"\"\" Masks codons", "off first letter if non-match, at which point we\"ll just want to mask", "imgt.append(imgtar[i]) if len(lcodon) == 2: newgerm[-1] = newgerm[-1] + \"N\" elif len(lcodon) ==", "include sequence.\", debug) log[\"PASS\"] = False log[\"FAIL\"] = \"FRAME-SHIFTING DELETION\" log[\"SEQ_IN\"] = receptor.sequence_input", "* len(regions[\"fwr3_imgt\"]) + \\ [108] * int(len(nseq)) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) != len(r.sequence_imgt)", "sequence alignment and partition files for IgPhyML output Arguments: out_dir (str): directory for", "int: number of ACGT differences. \"\"\" if len(seq1) != len(seq2): printError(\"Sequences are not", "from presto.Defaults import default_out_args from presto.IO import printLog, printMessage, printWarning, printError, printDebug from", "receptors[useqs[k]] rto = receptors[join[useqs[k]]] rto.dupcount += rfrom.dupcount if log is not None: log[rfrom.sequence_id][\"PASS\"]", "qi, debug) qcodons = [qi[i:i + 3] for i in range(0, len(qi), 3)]", "ptcs >= 0: printWarning(\"Masked sequence suddenly has a PTC.. %s\\n\" % r.sequence_id) mout[1][\"PASS\"]", "files. fail_writer (changeo.IO.TSVWriter): failed sequences writer object. min_seq (int): minimum number of data", "== qcodons[qpos]: # if both are the same, move both forward spos +=", "sequence. debug (bool) : print debugging statements. Returns: tuple: (modified input sequence, modified", "= ambigchar[join[useqs[kj]]] if ncj < ncounti: join[useqs[kj]] = useqs[ki] joinseqs[kj] = ki #", "log[\"END-MASKED\"] = str(spos) concatenated_seq = Seq(\"\") for i in scodons: concatenated_seq += i", "all intermediate files; all: delete all intermediate files.\"\"\") igphyml_group.add_argument(\"--optimize\", action=\"store\", dest=\"optimization\", type=str, default=\"lr\",", "output fasta file sequence headers.\"\"\") group.add_argument(\"--clones\", nargs=\"+\", action=\"store\", dest=\"target_clones\", help=\"\"\"List of clone IDs", "+= 1 ncon_seq = ncon_seq + concatenated_seq[counter:] concatenated_seq = ncon_seq log[\"SEQ_IN\"] = receptor.sequence_input", "parenthesis from metadata md = md.replace(\"(\",\"-\") #remove parenthesis from metadata r.setField(meta_data[m],md) if append", "sid = clones[j].sequence_id.translate(transtable)+\"_1\" + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) germ_id = [\"GERM\"]", "= igf.readline().split(\"\\t\") vals = igf.readline().split(\"\\t\") for i in range(3,len(names)-1): log[names[i]] = round(float(vals[i]),2) printLog(log)", "time() printMessage(\"Correcting frames and indels of sequences\", start_time=start_time, width=50) #subsampling loop init_clone_sizes =", "= 0 while spos < s_end and qpos < len(qcodons): if debug: print(scodons[spos]", "== \"__main__\": \"\"\" Parses command line arguments and calls main \"\"\" # Parse", "nmask (bool): if False, do not attempt to mask split codons sample_depth (int):", "i in range(0, len(sequence), 3): if sequence[i:(i+3)] in ptcs: return i return -1", "number of sequences per clone append (str): column name to append to sequence_id", "arguments (see igphyml -h for details)\") igphyml_group.add_argument(\"--igphyml\", action=\"store_true\", dest=\"igphyml\", help=\"\"\"Run IgPhyML on output?\"\"\")", "in meta_data: meta_data_list.append(clones[j].getField(m).replace(\":\", \"_\")) cid = delim + str(delim.join(meta_data_list)) sid = clones[j].sequence_id.translate(transtable) +", "pass_handle.close() if fail_handle is not None: output[\"fail\"] = fail_handle.name fail_handle.close() if log_handle is", "# Debug sequence modifications printDebug(ros, debug) printDebug(receptor.sequence_input, debug) printDebug(ris, debug) printDebug(receptor.sequence_imgt, debug) printDebug(\"RUNNING", "import os import random import subprocess import multiprocessing as mp from argparse import", "(share indexes with clones parameter). meta_data (str): Field to append to sequence IDs.", "are differences, e.g. if reconstruction was done before clonal clustering resolveglines = False", "= r.sequence_id log[\"CLONE\"] = r.clone log[\"PASS\"] = False log[\"FAIL\"] = \"NONFUNCTIONAL/PTC\" log[\"SEQ_IN\"] =", "+ clones[useqs_f[conseq_f]].sequence_id logs[clones[j].sequence_id][\"DUPLICATE\"]=True if fail_writer is not None: fail_writer.writeReceptor(clones[j]) else: useqs_f[conseq_f] = j", "of Receptor objects. meta_data (str): Field to append to sequence IDs. Splits identical", "(r) in IgPhyML. omega (str): omega optimization in IgPhyML (--omega) kappa (str): kappa", "scodons[i] = \"N\" + scodons[i][1:3] if scodons[i][1:3] != qi[1:3] or scodons[i+1] != qi[3:6]:", "imgt (list) : IMGT numbering of clonal positions . \"\"\" # bootstrap these", "sites def buildTrees(db_file, meta_data=None, target_clones=None, collapse=False, ncdr3=False, nmask=False, sample_depth=-1, min_seq=1,append=None, igphyml=False, nproc=1, optimization=\"lr\",", "ptcs = hasPTC(mask_seq) if ptcs >= 0: printWarning(\"Masked sequence suddenly has a PTC..", "False logs[r.sequence_id][\"FAIL\"] = \"FWR/CDR error\" logs[r.sequence_id][\"FWRCDRSEQ\"] = simgt fails[\"seq_fail\"] += 1 fails[\"region_fail\"] +=", "\"...\": #if IMGT gap, move forward in imgt spos += 1 elif scodons[spos]", "INSERTION\" break else: receptor.sequence_input = ros receptor.sequence_imgt = ris return frameshifts def findAndMask(receptor,", "clones. clones (list): list of Receptor objects. meta_data (str): Field to append to", "in range(0, len(imgt)): if imgt[j] != 108: nseq.append(newseqs[i][j]) if i == 0: ngerm.append(newgerm[j])", "clean != \"none\": log = OrderedDict() log[\"START\"] = \"CLEANING\" log[\"SCOPE\"] = clean printLog(log)", "out_args, fail_writer, mask = not nmask) if total == sample_depth: break # Start", "mask split codons Arguments: scodons (list): list of codons in IMGT sequence. qi", "= r.getField(meta_data[m]) md = md.replace(\",\",\"-\") #remove commas from metadata md = md.replace(\":\",\"-\") #remove", "tallies[i//3] > 0: newgerm.append(germline[i:(i+3)]) lcodon=germline[i:(i+3)] imgt.append(imgtar[i]) if len(lcodon) == 2: newgerm[-1] = newgerm[-1]", "End clone processing printMessage(\"Done\", start_time=start_time, end=True, width=50) log_handle = None if out_args[\"log_file\"] is", "qpos += 1 elif qcodons[qpos] == \"N\": # possible that SEQ-IMGT ends on", "germ_id = [\"GERM\"] if meta_data is not None: for i in range(1,len(meta_data)): germ_id.append(\"GERM\")", "'<') printError(\"HLP tree building failed\") log = OrderedDict() log[\"OUTPUT\"] = igphyml_out if oformat", "(least ambiguous chars) joinseqs = {} # id -> useq to join with", "True, only estimate GY94 trees and parameters clean (str): delete intermediate files? (none,", "keys[i] skj = keys[j] else: ski, cid = keys[i].split(delim) skj, cid = keys[j].split(delim)", "topologies p = subprocess.check_output(gy_args) except subprocess.CalledProcessError as e: print(\" \".join(gy_args)) print('error>', e.output, '<')", "split codons for use with igphyml? Returns: str: modified IMGT gapped sequence. log:", "+ str(delim.join(meta_data_list)) sid = clones[j].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) if", "e = estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--oformat\", action=\"store\", dest=\"oformat\", type=str,", "\"Clone too small: \" + str(len(cloneseqs[str(k)])) logs[clones[str(k)][j].sequence_id][\"PASS\"] = False clonesizes[str(k)] = -len(cloneseqs[str(k)]) else:", "for r in all_records: if target_clones is None or r.clone in target_clones: if", "estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--oformat\", action=\"store\", dest=\"oformat\", type=str, default=\"tab\", choices=(\"tab\", \"txt\"), help=\"\"\"IgPhyML output", "motifs to use in IgPhyML (--motifs) hotness (str): motif in IgPhyML (--hotness) oformat", "r.sequence_imgt: log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"SEQ_IN\"] = r.sequence_input", "interval\"\"\") igphyml_group.add_argument(\"--motifs\", action=\"store\", dest=\"motifs\", type=str, default=\"WRC_2:0,GYW_0:1,WA_1:2,TW_0:3,SYC_2:4,GRS_0:5\", help=\"\"\"Which motifs to estimate mutability.\"\"\") igphyml_group.add_argument(\"--hotness\", action=\"store\",", "receptor.sequence_input = ros[0:(psite + 3)] + ros[(psite + 3 + ins):] receptor.sequence_imgt =", "= list(useqs.keys()) join = {} # id -> sequence id to join with", "parameters (r), or nothing (n), for IgPhyML.\"\"\") igphyml_group.add_argument(\"--omega\", action=\"store\", dest=\"omega\", type=str, default=\"e,e\", choices", "1 masking succeeds \"\"\" if r.clone is None: printError(\"Cannot export datasets until sequences", "= estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--motifs\", action=\"store\", dest=\"motifs\", type=str, default=\"WRC_2:0,GYW_0:1,WA_1:2,TW_0:3,SYC_2:4,GRS_0:5\",", "3] for i in range(0, len(si), 3)] # deal with the fact that", "combination of topology (t) branch lengths (l) and parameters (r), or nothing (n),", "Arguments: receptor (changeo.Receptor.Receptor): Receptor object. oqpos (int) : position of interest in input", "in a sequence Arguments: sequence (str): IMGT gapped sequence in frame 1. Returns:", "for m in range(1,len(meta_data_ar)): st = c.getField(meta_data[0])+\":\"+c.getField(meta_data_ar[m]) c.setField(meta_data[0],st) if len(c.getField(\"imgtpartlabels\")) != len(imgtar): printError(\"IMGT", "clones\", start_time=start_time, width=50) for k in clones.keys(): if len(clones[str(k)]) < min_seq: for j", "Info __author__ = \"<NAME>\" from changeo import __version__, __date__ # Imports import os", "nseqs, delim, newgerm, conseqs, duplicate, imgt) if collapse: return len(useqs_f) else: return nseqs", "mask split codons for use with igphyml? Returns: str: modified IMGT gapped sequence.", "igphyml_group.add_argument(\"--motifs\", action=\"store\", dest=\"motifs\", type=str, default=\"WRC_2:0,GYW_0:1,WA_1:2,TW_0:3,SYC_2:4,GRS_0:5\", help=\"\"\"Which motifs to estimate mutability.\"\"\") igphyml_group.add_argument(\"--hotness\", action=\"store\", dest=\"hotness\",", "k in clones.keys(): if len(clones[str(k)]) < min_seq: for j in range(0, len(clones[str(k)])): logs[clones[str(k)][j].sequence_id][\"FAIL\"]", "definition. Arguments: receptor (changeo.Receptor.Receptor): Receptor object. recursive (bool) : was this method part", "help=\"\"\"Number of threads to parallelize IgPhyML across.\"\"\") igphyml_group.add_argument(\"--clean\", action=\"store\", choices=(\"none\", \"all\"), dest=\"clean\", type=str,", "if mout[1][\"PASS\"]: #if debug: receptor.sequence_input = ros receptor.sequence_imgt = ris frameshifts += 1", "sequence Arguments: receptor (changeo.Receptor.Receptor): Receptor object. oqpos (int) : position of interest in", "group.add_argument(\"--ncdr3\", action=\"store_true\", dest=\"ncdr3\", help=\"\"\"If specified, remove CDR3 from all sequences.\"\"\") group.add_argument(\"--nmask\", action=\"store_true\", dest=\"nmask\",", "options try: reader, writer, __ = getFormatOperators(format) except ValueError: printError(\"Invalid format %s.\" %", "include in output fasta file sequence headers.\"\"\") group.add_argument(\"--clones\", nargs=\"+\", action=\"store\", dest=\"target_clones\", help=\"\"\"List of", "kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", clean=\"none\", nohlp=False, asr=-1, format=default_format, out_args=default_out_args): \"\"\" Masks codons split", "clones, meta_data): \"\"\" Characterize potential mismatches between IMGT labels within a clone Arguments:", "printError(\"\\n\\nNo sequences found that match specified criteria.\",1) if sample_depth > 0: random.shuffle(big_enough) total", "newgerm.append(germline[i:(i+3)]) lcodon=germline[i:(i+3)] imgt.append(imgtar[i]) if len(lcodon) == 2: newgerm[-1] = newgerm[-1] + \"N\" elif", "across optimization (str): Optimize combination of topology (t) branch lengths (l) and parameters", "log[\"COLLAPSE\"] = collapse printLog(log) # Open output files out_label = \"lineages\" pass_handle =", "append to sequence ID to ensure uniqueness.\"\"\") igphyml_group = parser.add_argument_group(\"IgPhyML arguments (see igphyml", "= \"\".join(nseq) #print(\"Length: \" + str(ncdr3)) def characterizePartitionErrors(sequences, clones, meta_data): \"\"\" Characterize potential", "not the same length! %s %s\" % (seq1, seq2)) dist = 0 for", "compared pairwise, and all are zero # distance from the sequence they will", "format. out_args (dict): arguments for output preferences. Returns: dict: dictionary of output pass", "True if correctseqs: maxlen = sites maximgt = len(imgtar) for j in range(0,len(sequences)):", "line arguments and calls main \"\"\" # Parse command line arguments parser =", "for j in range(0, nseqs): conseq = \"\".join([str(seq_rec) for seq_rec in newseqs[j]]) if", "meta_data is not None: matches = 0 for m in meta_data: if ri.getField(m)", "ncdr3=False, nmask=False, sample_depth=-1, min_seq=1,append=None, igphyml=False, nproc=1, optimization=\"lr\", omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", clean=\"none\",", "is None: r.dupcount = 1 fails[\"rec_count\"] += 1 #printProgress(rec_count, rec_count, 0.05, start_time) ptcs", "md.replace(\",\",\"-\") #remove commas from metadata md = md.replace(\")\",\"-\") #remove parenthesis from metadata md", "or scodons[-1] == \".\": scodons[-1] = \"...\" else: scodons[-1] = \"NNN\" if \"END-MASKED\"", "= useqs[kj] joinseqs[ki] = kj else: ncj = 0 if useqs[kj] in join:", "of clonal lineage. conseqs (list) : consensus sequences. duplicate (bool) : duplicate sequence", "commas from sequence ID r.sequence_id = r.sequence_id.replace(\")\",\"-\") #remove parenthesis from sequence ID r.sequence_id", "= md.replace(\",\",\"-\") #remove commas from metadata md = md.replace(\":\",\"-\") #remove colons from metadata", "%s\\n\" % r.sequence_id) mout[1][\"PASS\"] = False mout[1][\"FAIL\"] = \"PTC_ADDED_FROM_MASKING\" logs[mout[1][\"ID\"]] = mout[1] if", "Receptor object. recursive (bool) : was this method part of a recursive call?", "(int) : starting position of IMGT sequence in input sequence. debug (bool) :", "list of IMGT positions for first sequence in clones, the germline sequence of", "position of IMGT sequence in input sequence. debug (bool) : print debugging statements.", "germline is \"\": germline = clones[0].getField(\"germline_imgt\") correctseqs = False for seqi in range(0,", "(imgtar, j)) #Resolve germline if there are differences, e.g. if reconstruction was done", "meta_data_list.append(str(clones[j].getField(meta_data[m]))) conseq_f = \"\".join([str(seq_rec) for seq_rec in newseqs[j]])+delim+\":\".join(meta_data_list) else: conseq_f = conseq if", "outrep = \".\".join(osplit[0:(len(osplit)-1)]) + \"_gy.tsv\" gyout = outfile + \"_igphyml_stats_gy.txt\" gy_args = [\"igphyml\",", "qpos)) ospos=spos oqpos=qpos spos += 1 qpos += 1 while spos < s_end", "fails[\"in_fail\"] log[\"CLONETOOSMALL\"] = fails[\"minseq_fail\"] log[\"CDRFWR_ERROR\"] = fails[\"region_fail\"] log[\"GERMLINE_PTC\"] = fails[\"germlineptc\"] log[\"OTHER_FAIL\"] = fails[\"other_fail\"]", "position of query sequence qi = qi[(receptor.v_seq_start - 1):] #tally where --- gaps", "to use when appending meta_data. Returns: list: deduplicated receptors within a clone. \"\"\"", "+= 1 return 0 if r.functional and ptcs < 0: #If IMGT regions", "(str): motif in IgPhyML (--hotness) oformat (str): output format for IgPhyML (tab or", "\"MASKED\" in log: log[\"MASKED\"] = log[\"MASKED\"] + \",\" + str(spos) else: log[\"MASKED\"] =", "log[\"RECORDS\"] = fails[\"totalreads\"] log[\"INITIAL_FILTER\"] = fails[\"rec_count\"] log[\"PASS\"] = pass_count log[\"FAIL\"] = fail_count log[\"NONFUNCTIONAL\"]", "OrderedDict() log[\"START\"] = \"IgPhyML HLP analysis\" log[\"OPTIMIZE\"] = optimization log[\"TS/TV\"] = kappa log[\"wFWR,wCDR\"]", "adjacent codon was found, something\"s up. log[\"FAIL\"] = \"FAILED_MATCH_QSTRING:\"+str(spos) #figure out if this", "clonesizes[str(k)] fail_count = fails[\"rec_count\"] - pass_count # End clone processing printMessage(\"Done\", start_time=start_time, end=True,", "line = line.rstrip(\"\\n\") line = line.rstrip(\"\\r\") lsplit = line.split(\"\\t\") if len(lsplit) == 4:", "< ncountj: join[useqs[ki]] = useqs[kj] joinseqs[ki] = kj else: ncj = 0 if", "sequences are compared pairwise, and all are zero # distance from the sequence", "RegionDefinition from changeo.Commandline import CommonHelpFormatter, checkArgs, getCommonArgParser, parseCommonArgs def correctMidCodonStart(scodons, qi, debug): \"\"\"", "-= 1 spos = ospos printDebug(\"But codon was apparently preserved\", debug) if \"IN-FRAME\"", "\"\"\" Run IgPhyML on outputted data Arguments: outfile (str): Output file name. igphymlout", "from collections import OrderedDict from textwrap import dedent from time import time from", "\"IgPhyML analysis\" printLog(log) # Note: Collapse can give misleading dupcount information if some", "of IMGT sequence in input sequence s_end (int): end of IMGT sequence qpos", "# Presto and changeo imports from presto.Defaults import default_out_args from presto.IO import printLog,", "\"\"\" # bootstrap these data if desired lg = len(newgerm) sites = range(0,", "= OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"PASS\"] = False log[\"FAIL\"] =", "do not attempt to mask split codons sample_depth (int): depth of subsampling before", "not divisible by three curgap = 0 for i in ndotgaps: if i", "if imgt[j] != 108: nseq.append(newseqs[i][j]) if i == 0: ngerm.append(newgerm[j]) nimgt.append(imgt[j]) else: ncdr3", "if not nohlp: try: #estimate HLP parameters/trees p = subprocess.check_output(hlp_args) except subprocess.CalledProcessError as", "= ris return frameshifts def findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log, debug,", "[80] * len(regions[\"fwr3_imgt\"]) + \\ [108] * int(len(nseq)) r.setField(\"imgtpartlabels\", imgtpartlabels) if len(r.getField(\"imgtpartlabels\")) !=", "qi[0:3]), debug) if scodons[i] != \"...\": if scodons[i][0:2] == \"..\": scodons[i] = \"NN\"", "r.sequence_imgt logs[r.sequence_id] = log logs[r.sequence_id][\"PASS\"] = False logs[r.sequence_id][\"FAIL\"] = \"Germline PTC\" fails[\"seq_fail\"] +=", "% (imgtar, j)) #Resolve germline if there are differences, e.g. if reconstruction was", "if \"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" + str(spos) else: log[\"END-MASKED\"]", "if scodons[i][1:3] != qi[1:3] or scodons[i+1] != qi[3:6]: qi = \"N\" + qi", "len(r.sequence_imgt): r.fwr4_imgt = r.fwr4_imgt + (\".\"*(len(r.sequence_imgt) - len(simgt))) simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"]", "not the same among sequences in the same clone.\", \"Be sure to cluster", "(int) : position of interest in IMGT sequence. log (dict) : log of", "(dict): contains log information for each sequence. fails (dict): counts of various sequence", "printLog(log) # Note: Collapse can give misleading dupcount information if some sequences have", "len(c.getField(\"imgtpartlabels\")) != len(imgtar): printError(\"IMGT assignments are not the same within clone %d!\\n\" %", "#If clone is too small, size is returned as a negative if clonesizes[str(k)]", "writer object. min_seq (int): minimum number of data sequences to include. Returns: int:", "r.sequence_id) mout[1][\"PASS\"] = False mout[1][\"FAIL\"] = \"PTC_ADDED_FROM_MASKING\" logs[mout[1][\"ID\"]] = mout[1] if mout[1][\"PASS\"]: #passreads", "< s_end and qpos < len(qcodons): if debug: print(scodons[spos] + \"\\t\" + qcodons[qpos])", "!= qi[3:6]: qi = \"NN\" + qi spos = i break elif scodons[i][0]", "was this function called recursively? \"\"\" frameshifts = 0 while spos < s_end", "imgtpartlabels = [13] * len(regions[\"fwr1_imgt\"]) + [30] * len(regions[\"cdr1_imgt\"]) + [45] * len(", "action=\"store\", dest=\"asr\", type=float, default=-1, help=\"\"\"Ancestral sequence reconstruction interval (0-1).\"\"\") return parser if __name__", "mask split codons Arguments: receptor (changeo.Receptor.Receptor): Receptor object. scodons (list): list of codons", "gaps not divisible by three curgap = 0 for i in ndotgaps: if", "log[\"OPTIMIZE\"] = optimization log[\"TS/TV\"] = kappa log[\"wFWR,wCDR\"] = omega log[\"MOTIFS\"] = motifs log[\"HOTNESS\"]", "+ ins):] receptor.sequence_imgt = ris[0:(pisite + 3)] + ris[(pisite + 3):] # Debug", "all intermediate files.\"\"\") igphyml_group.add_argument(\"--optimize\", action=\"store\", dest=\"optimization\", type=str, default=\"lr\", choices=(\"n\",\"r\",\"l\",\"lr\",\"tl\",\"tlr\"), help=\"\"\"Optimize combination of topology", "(str): IMGT gapped sequence in frame 1. Returns: int: negative if not PTCs,", "in range(0,len(si)): if si[i] == \"-\": gaps.append(1) ndotgaps.append(1) else: gaps.append(0) nsi = nsi", "\"_igphyml_CIlog.txt_hlp\" if os.path.isfile(cilog): os.remove(cilog) if oformat == \"tab\": os.rmdir(clone_dir) else: printWarning(\"Using --clean all", "import printLog, printMessage, printWarning, printError, printDebug from changeo.Defaults import default_format from changeo.IO import", "len(scodons[i]) == 3 and scodons[i] != \"NNN\": s_end = i printDebug(\"%i:%i:%s\" % (s_end,", "int: number of clones. \"\"\" s = \"\" delim = \"_\" duplicate =", "\"-o\", optimization, \"--omega\", omega, \"-t\", kappa, \"--motifs\", motifs, \"--hotness\", hotness, \"--oformat\", oformat, \"--outname\",", "= getOutputHandle(db_file, out_label=\"lineages-fail\", out_dir=out_args[\"out_dir\"], out_name=out_args[\"out_name\"], out_type=out_args[\"out_type\"]) fail_writer = writer(fail_handle, fields=out_fields) cloneseqs = {}", "log[\"START\"] = \"BuildTrees\" log[\"FILE\"] = os.path.basename(db_file) log[\"COLLAPSE\"] = collapse printLog(log) # Open output", "(bool) : print debugging statements. \"\"\" frameshifts = 0 for ins in range(1,", "from the query, at which point #we have to shift the frame. This", "metadata to include in output fasta file sequence headers.\"\"\") group.add_argument(\"--clones\", nargs=\"+\", action=\"store\", dest=\"target_clones\",", "log = OrderedDict() log[\"ID\"] = r.sequence_id log[\"CLONE\"] = r.clone log[\"PASS\"] = False log[\"FAIL\"]", "if useqs[kj] in join: ncj = ambigchar[join[useqs[kj]]] if ncj < ncounti: join[useqs[kj]] =", "dest=\"oformat\", type=str, default=\"tab\", choices=(\"tab\", \"txt\"), help=\"\"\"IgPhyML output format.\"\"\") igphyml_group.add_argument(\"--nohlp\", action=\"store_true\", dest=\"nohlp\", help=\"\"\"Don't run", "m_match = (matches == len(meta_data)) if dist == 0 and m_match: ncounti =", "elif len(lcodon) == 1: newgerm[-1] = newgerm[-1] + \"NN\" if ncdr3: ngerm =", "igphyml executable subprocess.check_output([\"igphyml\"]) except: printError(\"igphyml not found :-/\") try: #get GY94 starting topologies", "IMGT FWR/CDR sequence columns not detected.\\n! Cannot run CDR/FWR partitioned model on this", "nseqs = characterizePartitionErrors(sequences, clones, meta_data) tallies = [] for i in range(0, sites,", "omega=\"e,e\", kappa=\"e\", motifs=\"FCH\", hotness=\"e,e,e,e,e,e\", oformat=\"tab\", clean=\"none\", nohlp=False, asr=-1, format=default_format, out_args=default_out_args): \"\"\" Masks codons", "Splits identical sequences with different meta_data. meta_data (str): Field to append to sequence", "printError(\"%s %s\" % (\"Predicted germlines are not the same among sequences in the", "range(0, len(scodons)): printDebug(\"%s %s\" % (scodons[i], qi[0:3]), debug) if scodons[i] != \"...\": if", "+ [80]*len(regions[\"fwr3_imgt\"]) + [108] * len(regions[\"cdr3_imgt\"]) + \\ [120] * len(regions[\"fwr4_imgt\"]) r.setField(\"imgtpartlabels\", imgtpartlabels)", "printDebug(\"Masked %s at position %d, at end of subject sequence\" % (scodons[ospos], ospos),", "range(0, lg) transtable = clones[0].sequence_id.maketrans(\" \", \"_\") outfile = os.path.join(out_dir, \"%s.fasta\" % clones[0].clone)", "args = parser.parse_args() args_dict = parseCommonArgs(args) del args_dict[\"db_files\"] # Call main for each", "errors. meta_data (str): Field to append to sequence IDs. Splits identical sequences with", "0: newseqs.append([]) if tallies[i//3] > 0: newseqs[j].append(sequences[j][i:(i+3)]) lcodon = \"\" for i in", "loop through list of joined sequences and collapse keys = list(useqs.keys()) for k", "are clustered into clones.\") if r.dupcount is None: r.dupcount = 1 fails[\"rec_count\"] +=", "* (seqdiff) if sites % 3 != 0: printError(\"number of sites must be", "(str) : input sequence. spos (int) : starting position of IMGT sequence in", "in range(spos, len(scodons)): if scodons[i] != \"...\" and len(scodons[i]) == 3 and scodons[i]", "+ \"\\t\" + str(clonesizes[key])) outfile = os.path.join(clone_dir, \"%s.fasta\" % key) partfile = os.path.join(clone_dir,", "fails, out_args, fail_writer, mask = not nmask) if total == sample_depth: break #", "qi[3:6]: qi = \"NN\" + qi spos = i break elif scodons[i][0] ==", "md = md.replace(\"(\",\"-\") #remove parenthesis from metadata r.setField(meta_data[m],md) if append is not None:", "seq2, fbreak=False): \"\"\" Calculate the distance between two sequences counting only A,T,C,Gs Arguments:", "hasPTC(mask_seq) if ptcs >= 0: printWarning(\"Masked sequence suddenly has a PTC.. %s\\n\" %", "clone append (str): column name to append to sequence_id igphyml (bool): If True,", "ambigchar[useqs[ki]] = ncounti ambigchar[useqs[kj]] = ncountj # this algorithm depends on the fact", "len(simgt))) simgt = regions[\"fwr1_imgt\"] + regions[\"cdr1_imgt\"] + regions[\"fwr2_imgt\"] + \\ regions[\"cdr2_imgt\"] + regions[\"fwr3_imgt\"]", "False qi = receptor.sequence_input si = receptor.sequence_imgt log = OrderedDict() log[\"ID\"]=receptor.sequence_id log[\"CLONE\"]=receptor.clone log[\"PASS\"]", "str(delim.join(meta_data_list)) sid = clones[j].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), conseqs[j].replace(\".\", \"-\"))) if nseqs", "branch lengths (l) and parameters (r), or nothing (n), for IgPhyML.\"\"\") igphyml_group.add_argument(\"--omega\", action=\"store\",", "= 0 for i in ndotgaps: if i == 1: curgap += 1", "(int): end of IMGT sequence qpos (int): starting position of input sequence in", "and then predict germlines using --cloned\")) if sites > (len(germline)): seqdiff = sites", "records failed processing. igphyml-pass parameter estimates and lineage trees from running IgPhyML, if", "\"minseq_fail\":0, \"other_fail\":0, \"region_fail\":0, \"germlineptc\":0, \"fdcount\":0, \"totalreads\":0, \"passreads\":0, \"failreads\":0} # Mask codons split by", "!= 108: nseq.append(newseqs[i][j]) if i == 0: ngerm.append(newgerm[j]) nimgt.append(imgt[j]) else: ncdr3 += 1", "= {\"pass\": None, \"fail\": None} if pass_handle is not None: output[\"pass\"] = pass_handle.name", "debug) elif spos >= s_end or qcodons[qpos] != scodons[spos]: scodons[ospos] = \"NNN\" if", "or germline_alignment, v_call, j_call, clone_id, v_sequence_start \"\"\") # Parent parser parser_parent = getCommonArgParser(out_file=False,", "(modified input sequence, modified starting position of IMGT sequence in input sequence). \"\"\"", "False) printError(\"%s\\n%d\\n\" % (imgtar, j)) #Resolve germline if there are differences, e.g. if", "#get GY94 starting topologies p = subprocess.check_output(gy_args) except subprocess.CalledProcessError as e: print(\" \".join(gy_args))", "ospos), debug) scodons[ospos] = \"NNN\" if \"MASKED\" in log: log[\"MASKED\"] = log[\"MASKED\"] +", "if meta_data is not None: meta_data_ar = meta_data[0].split(\",\") for c in clones: if", "logs[clones[j].sequence_id][\"PASS\"] = False logs[clones[j].sequence_id][\"FAIL\"] = \"Duplication of \" + clones[useqs_f[conseq_f]].sequence_id logs[clones[j].sequence_id][\"DUPLICATE\"]=True if fail_writer", "= \"\".join([str(seq_rec) for seq_rec in newseqs[j]]) if meta_data is not None: meta_data_list =", "multiprocessing as mp from argparse import ArgumentParser from collections import OrderedDict from textwrap", "-len(useqs_f) elif not collapse and len(conseqs) < min_seq: for j in range(0, nseqs):", "maxlen for j in range(0,len(sequences)): cimgt = clones[j].getField(\"imgtpartlabels\") seqdiff = maxlen - len(sequences[j])", "== \"-\": gaps.append(1) ndotgaps.append(1) else: gaps.append(0) nsi = nsi + si[i] if si[i]", "log = OrderedDict() log[\"START\"] = \"IgPhyML GY94 tree estimation\" printLog(log) try: #check for", "== 1 and duplicate: if meta_data is not None: if meta_data[0] == \"DUPCOUNT\":", "at the next codon over in the #alignment if scodons[i][2:3] != qi[2:3] or", "scodons[-1] = \"...\" else: scodons[-1] = \"NNN\" if \"END-MASKED\" in log: log[\"END-MASKED\"] =", "else: log[\"END-MASKED\"] = str(spos) else: printDebug(\"Masked %s at position %d, but couldn't find", "lengths (l) and parameters (r), or nothing (n), for IgPhyML.\"\"\") igphyml_group.add_argument(\"--omega\", action=\"store\", dest=\"omega\",", "#printProgress(rec_count, rec_count, 0.05, start_time) log = OrderedDict() log[\"OUTPUT\"] = os.path.basename(pass_handle.name) if pass_handle is", "!= \".\": if seq1[i] != seq2[i]: dist += 1 if fbreak: break return", "the length of the first sequence in clones, and the number of sequences", "log = OrderedDict() log[\"START\"] = \"BuildTrees\" log[\"FILE\"] = os.path.basename(db_file) log[\"COLLAPSE\"] = collapse printLog(log)", "gy_args = [\"igphyml\", \"--repfile\", outfile, \"-m\", \"GY\", \"--run_id\", \"gy\", \"--outrep\", outrep, \"--threads\", str(nproc),\"--outname\",gyout]", "estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--oformat\", action=\"store\", dest=\"oformat\", type=str, default=\"tab\", choices=(\"tab\",", "debug) if \"IN-FRAME\" in log: log[\"IN-FRAME\"] = log[\"IN-FRAME\"] + \",\" + str(spos) else:", "parser.add_argument_group(\"sequence processing arguments\") group.add_argument(\"--collapse\", action=\"store_true\", dest=\"collapse\", help=\"\"\"If specified, collapse identical sequences before exporting", "None: log_handle = open(out_args[\"log_file\"], \"w\") for j in logs.keys(): printLog(logs[j], handle=log_handle) pass_handle.write(str(nclones)+\"\\n\") for", "all are zero # distance from the sequence they will be collapse to.", "= os.path.basename(db_file) log[\"COLLAPSE\"] = collapse printLog(log) # Open output files out_label = \"lineages\"", "> 0: newgerm.append(germline[i:(i+3)]) lcodon=germline[i:(i+3)] imgt.append(imgtar[i]) if len(lcodon) == 2: newgerm[-1] = newgerm[-1] +", "1 return 0 elif regions[\"fwr3_imgt\"] is not \"\" and regions[\"fwr3_imgt\"] is not None:", "r.getField(meta_data[m]) md = md.replace(\",\",\"-\") #remove commas from metadata md = md.replace(\":\",\"-\") #remove colons", "return 0 else: #imgt_warn = \"\\n! IMGT FWR/CDR sequence columns not detected.\\n! Cannot", "this data.\\n\" imgtpartlabels = [0] * len(r.sequence_imgt) r.setField(\"imgtpartlabels\", imgtpartlabels) mout = maskSplitCodons(r, mask=mask)", "ncon_seq + concatenated_seq[counter:] concatenated_seq = ncon_seq log[\"SEQ_IN\"] = receptor.sequence_input log[\"SEQ_IMGT\"] = receptor.sequence_imgt log[\"SEQ_MASKED\"]", "md.replace(\"(\",\"-\") #remove parenthesis from metadata r.setField(meta_data[m],md) if append is not None: if append", "the same clone.\", \"Be sure to cluster sequences into clones first and then", "delim + cid.replace(\":\", \"_\") sid = clones[num].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\",", "rto.sequence_id del useqs[k] return useqs def hasPTC(sequence): \"\"\" Determines whether a PTC exits", "Bio.Seq import Seq from functools import partial # Presto and changeo imports from", "IgPhyML on outputted data? if igphyml: runIgPhyML(pass_handle.name, igphyml_out=igphyml_out, clone_dir=clone_dir, nproc=nproc, optimization=optimization, omega=omega, kappa=kappa,", "ki.count(\"A\") + ki.count(\"T\") + ki.count(\"G\") + ki.count(\"C\") ncountj = kj.count(\"A\") + kj.count(\"T\") +", "ndotgaps = [] nsi = \"\" for i in range(0,len(si)): if si[i] ==", "return -len(useqs_f) elif not collapse and len(conseqs) < min_seq: for j in range(0,", "clone_name = out_args[\"out_name\"] if dir_name is None: clone_dir = clone_name else: clone_dir =", "log[\"MASKED\"] = log[\"MASKED\"] + \",\" + str(spos) else: log[\"MASKED\"] = str(spos) else: log[\"PASS\"]", "printError(\"ChangeO file needs to be corrected\") for j in range(0,len(imgtar)): if c.getField(\"imgtpartlabels\")[j] !=", "= fails[\"del_fail\"] log[\"FRAMESHIFT_INS\"] = fails[\"in_fail\"] log[\"CLONETOOSMALL\"] = fails[\"minseq_fail\"] log[\"CDRFWR_ERROR\"] = fails[\"region_fail\"] log[\"GERMLINE_PTC\"] =", "+= 1 qpos += 1 elif qcodons[qpos] == \"N\": # possible that SEQ-IMGT", "qpos >= len(qcodons) and spos < s_end: printDebug(\"FAILING MATCH\", debug) log[\"PASS\"] = False", "maps sequence to index in Receptor list. receptors (dict): receptors within a clone", "useqs[kj] joinseqs[ki] = kj else: ncj = 0 if useqs[kj] in join: ncj", "lineage. conseqs (list) : consensus sequences. duplicate (bool) : duplicate sequence if only", "all) \"\"\" osplit = outfile.split(\".\") outrep = \".\".join(osplit[0:(len(osplit)-1)]) + \"_gy.tsv\" gyout = outfile", "= True r.sequence_id = r.sequence_id.replace(\",\",\"-\") #remove commas from sequence ID r.sequence_id = r.sequence_id.replace(\":\",\"-\")", "\"-\": gaps.append(1) ndotgaps.append(1) else: gaps.append(0) nsi = nsi + si[i] if si[i] !=", "regions[\"fwr4_imgt\"] if len(simgt) < len(r.sequence_imgt): r.fwr4_imgt = r.fwr4_imgt + (\".\"*(len(r.sequence_imgt) - len(simgt))) simgt", "+ \"N\"*(seqdiff) last = cimgt[-1] cimgt.extend([last]*(imgtdiff)) clones[j].setField(\"imgtpartlabels\",cimgt) if meta_data is not None: meta_data_ar", "sites or len(clones[seqi].getField(\"imgtpartlabels\")) != len(imgtar): correctseqs = True if correctseqs: maxlen = sites", "sequence[i:(i+3)] in ptcs: return i return -1 def rmCDR3(sequences, clones): \"\"\" Remove CDR3", "parenthesis from sequence ID if(meta_data is not None): for m in range(0,len(meta_data)): md", "of a recursive call? mask (bool) : mask split codons for use with", "in the #alignment if scodons[i][2:3] != qi[2:3] or scodons[i + 1] != qi[3:6]:", "(sequences[j],clones[j].getField(\"imgtpartlabels\")),False) printError(\"ChangeO file needs to be corrected\") for j in range(0,len(imgtar)): if c.getField(\"imgtpartlabels\")[j]", "action=\"store\", dest=\"hotness\", type=str, default=\"e,e,e,e,e,e\", help=\"\"\"Mutability parameters to estimate: e = estimate, ce =", "None else None log[\"RECORDS\"] = fails[\"totalreads\"] log[\"INITIAL_FILTER\"] = fails[\"rec_count\"] log[\"PASS\"] = pass_count log[\"FAIL\"]", "and calls main \"\"\" # Parse command line arguments parser = getArgParser() checkArgs(parser)", "oformat (str): output format for IgPhyML (tab or txt) nohlp (bool): If True,", "printDebug(\"Frame-shifting gap detected! Refusing to include sequence.\", debug) log[\"PASS\"] = False log[\"FAIL\"] =", "\"END-MASKED\" in log: log[\"END-MASKED\"] = log[\"END-MASKED\"] + \",\" + str(spos) else: log[\"END-MASKED\"] =", "{} #sequence id -> number ATCG nucleotides for i in range(0,len(keys)-1): for j", "min_seq: big_enough.append(r) fails[\"totalreads\"] = len(all_records) #fails[\"minseq_fail\"] = len(all_records) - len(big_enough) if len(big_enough) ==", "len(germline) germline = germline + \"N\" * (seqdiff) if sites % 3 !=", "= clones[num].sequence_id.translate(transtable) + cid clonef.write(\">%s\\n%s\\n\" % (sid.replace(\":\",\"-\"), seq.replace(\".\", \"-\"))) if len(useqs_f) == 1", "[] found_no_funct = False for r in records: if r.functional is None: r.functional", "= optimization log[\"TS/TV\"] = kappa log[\"wFWR,wCDR\"] = omega log[\"MOTIFS\"] = motifs log[\"HOTNESS\"] =", "\".\": ndotgaps.append(0) #find any gaps not divisible by three curgap = 0 for", "sequence ID r.sequence_id = r.sequence_id.replace(\")\",\"-\") #remove parenthesis from sequence ID r.sequence_id = r.sequence_id.replace(\"(\",\"-\")", "estimate: e = estimate, ce = estimate + confidence interval\"\"\") igphyml_group.add_argument(\"--motifs\", action=\"store\", dest=\"motifs\",", "tree building failed\") log = OrderedDict() log[\"OUTPUT\"] = igphyml_out if oformat == \"tab\":", "assignments are not the same within clone %d!\\n\" % c.clone, False) printError(c.getField(\"imgtpartlabels\"), False)", "import dedent from time import time from Bio.Seq import Seq from functools import", "ngermline = c.getField(\"germline_imgt\") if ngermline != germline: resolveglines = True if resolveglines: printError(\"%s" ]
[ "Options # 无头浏览器 chrome_options = Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') from selenium.webdriver import ChromeOptions #", "import WubaItem from selenium import webdriver from lxml import etree from selenium.webdriver.chrome.options import", "item['gongci'] = gongci item['yueli'] = yueli item['name'] = name yield item #进行分页操作 if", "bot = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options, options=option) class Wuba1Spider(scrapy.Spider): name = 'wuba_1' # allowed_domains =", "'https://xianyang.58.com/job/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d202408-01d1-d86d-1573-f4faec6defb1&ClickID=4'] #设置通用url模板 url = 'https://xianyang.58.com/job/pn%d/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d302408-01d1-d5f6-40be-8cb0310f3453&ClickID=3' page_num = 2 def parse(self, response): li_list =", "selenium.webdriver import ChromeOptions # 规避检测 option = ChromeOptions() option.add_experimental_option('excludeSwitches', ['enable-automation']) bot = webdriver.Chrome(executable_path='chromedriver.exe',", "= WubaItem() # 解析工作岗位 name = li.xpath('./div/div/a/span[2]/text()')[0].extract() #解析详情页url deta_url = li.xpath('./div/div/a/@href')[0].extract() deta_url =", "import scrapy from wuba.items import WubaItem from selenium import webdriver from lxml import", "WubaItem from selenium import webdriver from lxml import etree from selenium.webdriver.chrome.options import Options", "解析工作岗位 name = li.xpath('./div/div/a/span[2]/text()')[0].extract() #解析详情页url deta_url = li.xpath('./div/div/a/@href')[0].extract() deta_url = ''.join(deta_url) new_url =", "#设置通用url模板 url = 'https://xianyang.58.com/job/pn%d/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d302408-01d1-d5f6-40be-8cb0310f3453&ClickID=3' page_num = 2 def parse(self, response): li_list = response.xpath('//ul[@id=\"list_con\"]/li')", "li_list = response.xpath('//ul[@id=\"list_con\"]/li') for li in li_list: #实例化一个item item = WubaItem() # 解析工作岗位", "from selenium.webdriver.chrome.options import Options # 无头浏览器 chrome_options = Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') from selenium.webdriver", "#解析详情页url deta_url = li.xpath('./div/div/a/@href')[0].extract() deta_url = ''.join(deta_url) new_url = str(deta_url) item['new_url'] = new_url", "item = WubaItem() # 解析工作岗位 name = li.xpath('./div/div/a/span[2]/text()')[0].extract() #解析详情页url deta_url = li.xpath('./div/div/a/@href')[0].extract() deta_url", "url = 'https://xianyang.58.com/job/pn%d/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d302408-01d1-d5f6-40be-8cb0310f3453&ClickID=3' page_num = 2 def parse(self, response): li_list = response.xpath('//ul[@id=\"list_con\"]/li') for", "import ChromeOptions # 规避检测 option = ChromeOptions() option.add_experimental_option('excludeSwitches', ['enable-automation']) bot = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options,", "'https://xianyang.58.com/job/pn%d/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d302408-01d1-d5f6-40be-8cb0310f3453&ClickID=3' page_num = 2 def parse(self, response): li_list = response.xpath('//ul[@id=\"list_con\"]/li') for li in", "import Options # 无头浏览器 chrome_options = Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') from selenium.webdriver import ChromeOptions", "WubaItem() # 解析工作岗位 name = li.xpath('./div/div/a/span[2]/text()')[0].extract() #解析详情页url deta_url = li.xpath('./div/div/a/@href')[0].extract() deta_url = ''.join(deta_url)", "li in li_list: #实例化一个item item = WubaItem() # 解析工作岗位 name = li.xpath('./div/div/a/span[2]/text()')[0].extract() #解析详情页url", "#实例化一个item item = WubaItem() # 解析工作岗位 name = li.xpath('./div/div/a/span[2]/text()')[0].extract() #解析详情页url deta_url = li.xpath('./div/div/a/@href')[0].extract()", "= str(deta_url) item['new_url'] = new_url bot.get(new_url) page = bot.page_source tree = etree.HTML(page) #解析工资", "= li.xpath('./div/div/a/@href')[0].extract() deta_url = ''.join(deta_url) new_url = str(deta_url) item['new_url'] = new_url bot.get(new_url) page", "yueli item['name'] = name yield item #进行分页操作 if self.page_num <= 5: num_url =", "# 规避检测 option = ChromeOptions() option.add_experimental_option('excludeSwitches', ['enable-automation']) bot = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options, options=option) class", "new_url = str(deta_url) item['new_url'] = new_url bot.get(new_url) page = bot.page_source tree = etree.HTML(page)", "bot.page_source tree = etree.HTML(page) #解析工资 gongci = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[2]/span[2]//text()') gongci = ''.join(gongci) #解析学历 yueli", "tree.xpath('/html/body/div[3]/div[3]/div[1]/div[2]/span[2]//text()') gongci = ''.join(gongci) #解析学历 yueli = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[4]/span[2]//text()')[0] # /html/body/div[3]/div[3]/div[1]/div[4]/span[2] #提交item item['gongci'] =", "selenium.webdriver.chrome.options import Options # 无头浏览器 chrome_options = Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') from selenium.webdriver import", "ChromeOptions # 规避检测 option = ChromeOptions() option.add_experimental_option('excludeSwitches', ['enable-automation']) bot = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options, options=option)", "#解析工资 gongci = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[2]/span[2]//text()') gongci = ''.join(gongci) #解析学历 yueli = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[4]/span[2]//text()')[0] # /html/body/div[3]/div[3]/div[1]/div[4]/span[2]", "# allowed_domains = ['www.xxx.com'] start_urls = [ 'https://xianyang.58.com/job/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d202408-01d1-d86d-1573-f4faec6defb1&ClickID=4'] #设置通用url模板 url = 'https://xianyang.58.com/job/pn%d/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d302408-01d1-d5f6-40be-8cb0310f3453&ClickID=3' page_num", "bot.get(new_url) page = bot.page_source tree = etree.HTML(page) #解析工资 gongci = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[2]/span[2]//text()') gongci =", "无头浏览器 chrome_options = Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') from selenium.webdriver import ChromeOptions # 规避检测 option", "#提交item item['gongci'] = gongci item['yueli'] = yueli item['name'] = name yield item #进行分页操作", "= webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options, options=option) class Wuba1Spider(scrapy.Spider): name = 'wuba_1' # allowed_domains = ['www.xxx.com']", "def parse(self, response): li_list = response.xpath('//ul[@id=\"list_con\"]/li') for li in li_list: #实例化一个item item =", "''.join(deta_url) new_url = str(deta_url) item['new_url'] = new_url bot.get(new_url) page = bot.page_source tree =", "start_urls = [ 'https://xianyang.58.com/job/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d202408-01d1-d86d-1573-f4faec6defb1&ClickID=4'] #设置通用url模板 url = 'https://xianyang.58.com/job/pn%d/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d302408-01d1-d5f6-40be-8cb0310f3453&ClickID=3' page_num = 2 def parse(self,", "class Wuba1Spider(scrapy.Spider): name = 'wuba_1' # allowed_domains = ['www.xxx.com'] start_urls = [ 'https://xianyang.58.com/job/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d202408-01d1-d86d-1573-f4faec6defb1&ClickID=4']", "= 'wuba_1' # allowed_domains = ['www.xxx.com'] start_urls = [ 'https://xianyang.58.com/job/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d202408-01d1-d86d-1573-f4faec6defb1&ClickID=4'] #设置通用url模板 url =", "= ''.join(deta_url) new_url = str(deta_url) item['new_url'] = new_url bot.get(new_url) page = bot.page_source tree", "webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options, options=option) class Wuba1Spider(scrapy.Spider): name = 'wuba_1' # allowed_domains = ['www.xxx.com'] start_urls", "= 'https://xianyang.58.com/job/pn%d/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d302408-01d1-d5f6-40be-8cb0310f3453&ClickID=3' page_num = 2 def parse(self, response): li_list = response.xpath('//ul[@id=\"list_con\"]/li') for li", "tree.xpath('/html/body/div[3]/div[3]/div[1]/div[4]/span[2]//text()')[0] # /html/body/div[3]/div[3]/div[1]/div[4]/span[2] #提交item item['gongci'] = gongci item['yueli'] = yueli item['name'] = name", "yueli = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[4]/span[2]//text()')[0] # /html/body/div[3]/div[3]/div[1]/div[4]/span[2] #提交item item['gongci'] = gongci item['yueli'] = yueli item['name']", "wuba.items import WubaItem from selenium import webdriver from lxml import etree from selenium.webdriver.chrome.options", "etree.HTML(page) #解析工资 gongci = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[2]/span[2]//text()') gongci = ''.join(gongci) #解析学历 yueli = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[4]/span[2]//text()')[0] #", "= Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') from selenium.webdriver import ChromeOptions # 规避检测 option = ChromeOptions()", "str(deta_url) item['new_url'] = new_url bot.get(new_url) page = bot.page_source tree = etree.HTML(page) #解析工资 gongci", "= etree.HTML(page) #解析工资 gongci = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[2]/span[2]//text()') gongci = ''.join(gongci) #解析学历 yueli = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[4]/span[2]//text()')[0]", "item['new_url'] = new_url bot.get(new_url) page = bot.page_source tree = etree.HTML(page) #解析工资 gongci =", "parse(self, response): li_list = response.xpath('//ul[@id=\"list_con\"]/li') for li in li_list: #实例化一个item item = WubaItem()", "gongci item['yueli'] = yueli item['name'] = name yield item #进行分页操作 if self.page_num <=", "= [ 'https://xianyang.58.com/job/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d202408-01d1-d86d-1573-f4faec6defb1&ClickID=4'] #设置通用url模板 url = 'https://xianyang.58.com/job/pn%d/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d302408-01d1-d5f6-40be-8cb0310f3453&ClickID=3' page_num = 2 def parse(self, response):", "['enable-automation']) bot = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options, options=option) class Wuba1Spider(scrapy.Spider): name = 'wuba_1' # allowed_domains", "= bot.page_source tree = etree.HTML(page) #解析工资 gongci = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[2]/span[2]//text()') gongci = ''.join(gongci) #解析学历", "gongci = ''.join(gongci) #解析学历 yueli = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[4]/span[2]//text()')[0] # /html/body/div[3]/div[3]/div[1]/div[4]/span[2] #提交item item['gongci'] = gongci", "= 2 def parse(self, response): li_list = response.xpath('//ul[@id=\"list_con\"]/li') for li in li_list: #实例化一个item", "#解析学历 yueli = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[4]/span[2]//text()')[0] # /html/body/div[3]/div[3]/div[1]/div[4]/span[2] #提交item item['gongci'] = gongci item['yueli'] = yueli", "= response.xpath('//ul[@id=\"list_con\"]/li') for li in li_list: #实例化一个item item = WubaItem() # 解析工作岗位 name", "li_list: #实例化一个item item = WubaItem() # 解析工作岗位 name = li.xpath('./div/div/a/span[2]/text()')[0].extract() #解析详情页url deta_url =", "= tree.xpath('/html/body/div[3]/div[3]/div[1]/div[2]/span[2]//text()') gongci = ''.join(gongci) #解析学历 yueli = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[4]/span[2]//text()')[0] # /html/body/div[3]/div[3]/div[1]/div[4]/span[2] #提交item item['gongci']", "= gongci item['yueli'] = yueli item['name'] = name yield item #进行分页操作 if self.page_num", "= name yield item #进行分页操作 if self.page_num <= 5: num_url = format(self.url%self.page_num) self.page_num+=1", "chrome_options.add_argument('--disable-gpu') from selenium.webdriver import ChromeOptions # 规避检测 option = ChromeOptions() option.add_experimental_option('excludeSwitches', ['enable-automation']) bot", "tree = etree.HTML(page) #解析工资 gongci = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[2]/span[2]//text()') gongci = ''.join(gongci) #解析学历 yueli =", "chrome_options=chrome_options, options=option) class Wuba1Spider(scrapy.Spider): name = 'wuba_1' # allowed_domains = ['www.xxx.com'] start_urls =", "option = ChromeOptions() option.add_experimental_option('excludeSwitches', ['enable-automation']) bot = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options, options=option) class Wuba1Spider(scrapy.Spider): name", "import etree from selenium.webdriver.chrome.options import Options # 无头浏览器 chrome_options = Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu')", "= tree.xpath('/html/body/div[3]/div[3]/div[1]/div[4]/span[2]//text()')[0] # /html/body/div[3]/div[3]/div[1]/div[4]/span[2] #提交item item['gongci'] = gongci item['yueli'] = yueli item['name'] =", "from selenium import webdriver from lxml import etree from selenium.webdriver.chrome.options import Options #", "import webdriver from lxml import etree from selenium.webdriver.chrome.options import Options # 无头浏览器 chrome_options", "= ''.join(gongci) #解析学历 yueli = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[4]/span[2]//text()')[0] # /html/body/div[3]/div[3]/div[1]/div[4]/span[2] #提交item item['gongci'] = gongci item['yueli']", "''.join(gongci) #解析学历 yueli = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[4]/span[2]//text()')[0] # /html/body/div[3]/div[3]/div[1]/div[4]/span[2] #提交item item['gongci'] = gongci item['yueli'] =", "option.add_experimental_option('excludeSwitches', ['enable-automation']) bot = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options, options=option) class Wuba1Spider(scrapy.Spider): name = 'wuba_1' #", "2 def parse(self, response): li_list = response.xpath('//ul[@id=\"list_con\"]/li') for li in li_list: #实例化一个item item", "Wuba1Spider(scrapy.Spider): name = 'wuba_1' # allowed_domains = ['www.xxx.com'] start_urls = [ 'https://xianyang.58.com/job/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d202408-01d1-d86d-1573-f4faec6defb1&ClickID=4'] #设置通用url模板", "= new_url bot.get(new_url) page = bot.page_source tree = etree.HTML(page) #解析工资 gongci = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[2]/span[2]//text()')", "new_url bot.get(new_url) page = bot.page_source tree = etree.HTML(page) #解析工资 gongci = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[2]/span[2]//text()') gongci", "from selenium.webdriver import ChromeOptions # 规避检测 option = ChromeOptions() option.add_experimental_option('excludeSwitches', ['enable-automation']) bot =", "item['name'] = name yield item #进行分页操作 if self.page_num <= 5: num_url = format(self.url%self.page_num)", "= ['www.xxx.com'] start_urls = [ 'https://xianyang.58.com/job/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d202408-01d1-d86d-1573-f4faec6defb1&ClickID=4'] #设置通用url模板 url = 'https://xianyang.58.com/job/pn%d/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d302408-01d1-d5f6-40be-8cb0310f3453&ClickID=3' page_num = 2", "chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') from selenium.webdriver import ChromeOptions # 规避检测 option = ChromeOptions() option.add_experimental_option('excludeSwitches', ['enable-automation'])", "chrome_options = Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') from selenium.webdriver import ChromeOptions # 规避检测 option =", "= li.xpath('./div/div/a/span[2]/text()')[0].extract() #解析详情页url deta_url = li.xpath('./div/div/a/@href')[0].extract() deta_url = ''.join(deta_url) new_url = str(deta_url) item['new_url']", "in li_list: #实例化一个item item = WubaItem() # 解析工作岗位 name = li.xpath('./div/div/a/span[2]/text()')[0].extract() #解析详情页url deta_url", "deta_url = ''.join(deta_url) new_url = str(deta_url) item['new_url'] = new_url bot.get(new_url) page = bot.page_source", "ChromeOptions() option.add_experimental_option('excludeSwitches', ['enable-automation']) bot = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options, options=option) class Wuba1Spider(scrapy.Spider): name = 'wuba_1'", "['www.xxx.com'] start_urls = [ 'https://xianyang.58.com/job/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d202408-01d1-d86d-1573-f4faec6defb1&ClickID=4'] #设置通用url模板 url = 'https://xianyang.58.com/job/pn%d/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d302408-01d1-d5f6-40be-8cb0310f3453&ClickID=3' page_num = 2 def", "options=option) class Wuba1Spider(scrapy.Spider): name = 'wuba_1' # allowed_domains = ['www.xxx.com'] start_urls = [", "scrapy from wuba.items import WubaItem from selenium import webdriver from lxml import etree", "name = 'wuba_1' # allowed_domains = ['www.xxx.com'] start_urls = [ 'https://xianyang.58.com/job/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d202408-01d1-d86d-1573-f4faec6defb1&ClickID=4'] #设置通用url模板 url", "page = bot.page_source tree = etree.HTML(page) #解析工资 gongci = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[2]/span[2]//text()') gongci = ''.join(gongci)", "yield item #进行分页操作 if self.page_num <= 5: num_url = format(self.url%self.page_num) self.page_num+=1 yield scrapy.Request(url=num_url,callback=self.parse)", "# 无头浏览器 chrome_options = Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') from selenium.webdriver import ChromeOptions # 规避检测", "li.xpath('./div/div/a/@href')[0].extract() deta_url = ''.join(deta_url) new_url = str(deta_url) item['new_url'] = new_url bot.get(new_url) page =", "selenium import webdriver from lxml import etree from selenium.webdriver.chrome.options import Options # 无头浏览器", "li.xpath('./div/div/a/span[2]/text()')[0].extract() #解析详情页url deta_url = li.xpath('./div/div/a/@href')[0].extract() deta_url = ''.join(deta_url) new_url = str(deta_url) item['new_url'] =", "deta_url = li.xpath('./div/div/a/@href')[0].extract() deta_url = ''.join(deta_url) new_url = str(deta_url) item['new_url'] = new_url bot.get(new_url)", "gongci = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[2]/span[2]//text()') gongci = ''.join(gongci) #解析学历 yueli = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[4]/span[2]//text()')[0] # /html/body/div[3]/div[3]/div[1]/div[4]/span[2] #提交item", "name = li.xpath('./div/div/a/span[2]/text()')[0].extract() #解析详情页url deta_url = li.xpath('./div/div/a/@href')[0].extract() deta_url = ''.join(deta_url) new_url = str(deta_url)", "规避检测 option = ChromeOptions() option.add_experimental_option('excludeSwitches', ['enable-automation']) bot = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options, options=option) class Wuba1Spider(scrapy.Spider):", "webdriver from lxml import etree from selenium.webdriver.chrome.options import Options # 无头浏览器 chrome_options =", "'wuba_1' # allowed_domains = ['www.xxx.com'] start_urls = [ 'https://xianyang.58.com/job/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d202408-01d1-d86d-1573-f4faec6defb1&ClickID=4'] #设置通用url模板 url = 'https://xianyang.58.com/job/pn%d/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d302408-01d1-d5f6-40be-8cb0310f3453&ClickID=3'", "# 解析工作岗位 name = li.xpath('./div/div/a/span[2]/text()')[0].extract() #解析详情页url deta_url = li.xpath('./div/div/a/@href')[0].extract() deta_url = ''.join(deta_url) new_url", "lxml import etree from selenium.webdriver.chrome.options import Options # 无头浏览器 chrome_options = Options() chrome_options.add_argument('--headless')", "item['yueli'] = yueli item['name'] = name yield item #进行分页操作 if self.page_num <= 5:", "Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') from selenium.webdriver import ChromeOptions # 规避检测 option = ChromeOptions() option.add_experimental_option('excludeSwitches',", "= yueli item['name'] = name yield item #进行分页操作 if self.page_num <= 5: num_url", "from wuba.items import WubaItem from selenium import webdriver from lxml import etree from", "from lxml import etree from selenium.webdriver.chrome.options import Options # 无头浏览器 chrome_options = Options()", "= ChromeOptions() option.add_experimental_option('excludeSwitches', ['enable-automation']) bot = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options, options=option) class Wuba1Spider(scrapy.Spider): name =", "response.xpath('//ul[@id=\"list_con\"]/li') for li in li_list: #实例化一个item item = WubaItem() # 解析工作岗位 name =", "etree from selenium.webdriver.chrome.options import Options # 无头浏览器 chrome_options = Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') from", "# /html/body/div[3]/div[3]/div[1]/div[4]/span[2] #提交item item['gongci'] = gongci item['yueli'] = yueli item['name'] = name yield", "allowed_domains = ['www.xxx.com'] start_urls = [ 'https://xianyang.58.com/job/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d202408-01d1-d86d-1573-f4faec6defb1&ClickID=4'] #设置通用url模板 url = 'https://xianyang.58.com/job/pn%d/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d302408-01d1-d5f6-40be-8cb0310f3453&ClickID=3' page_num =", "/html/body/div[3]/div[3]/div[1]/div[4]/span[2] #提交item item['gongci'] = gongci item['yueli'] = yueli item['name'] = name yield item", "for li in li_list: #实例化一个item item = WubaItem() # 解析工作岗位 name = li.xpath('./div/div/a/span[2]/text()')[0].extract()", "name yield item #进行分页操作 if self.page_num <= 5: num_url = format(self.url%self.page_num) self.page_num+=1 yield", "page_num = 2 def parse(self, response): li_list = response.xpath('//ul[@id=\"list_con\"]/li') for li in li_list:", "[ 'https://xianyang.58.com/job/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d202408-01d1-d86d-1573-f4faec6defb1&ClickID=4'] #设置通用url模板 url = 'https://xianyang.58.com/job/pn%d/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d302408-01d1-d5f6-40be-8cb0310f3453&ClickID=3' page_num = 2 def parse(self, response): li_list", "response): li_list = response.xpath('//ul[@id=\"list_con\"]/li') for li in li_list: #实例化一个item item = WubaItem() #" ]
[ "e in all_enums if e.name == \"AttribMask\") clbuf_mask = next(e for e in", "next(e for e in all_enums if e.name == \"ClearBufferMask\") assert attrib_mask.is_bitmask assert (", "\"0x00000100\" ) assert clbuf_mask.is_bitmask assert ( next(v for v in clbuf_mask.values if v.name", "xml.Element): for node in spec: if node.tag == \"enums\": yield node def _collect_features(spec:", "as xml from gladiator.parse.enum import parse_required_enums from gladiator.parse.feature import ( get_feature_requirements, Feature, FeatureApi,", "xml.Element): candidates = tuple(_get_enum_nodes(spec)) all_enums = tuple(parse_required_enums(_collect_required(spec), candidates)) attrib_mask = next(e for e", "node.tag == \"feature\": yield node def _collect_required(spec: xml.Element): return tuple( get_feature_requirements( TESTED_FEATURE, tuple(_collect_features(spec))", "gladiator.parse.feature import ( get_feature_requirements, Feature, FeatureApi, FeatureVersion, ) TESTED_FEATURE = Feature(api=FeatureApi.GL, version=FeatureVersion(major=1, minor=0))", "\"0xFFFFFFFF\" ) assert ( next(v for v in attrib_mask.values if v.name == \"GL_DEPTH_BUFFER_BIT\").value", "( get_feature_requirements, Feature, FeatureApi, FeatureVersion, ) TESTED_FEATURE = Feature(api=FeatureApi.GL, version=FeatureVersion(major=1, minor=0)) def _get_enum_nodes(spec:", "for v in attrib_mask.values if v.name == \"GL_CURRENT_BIT\").value == \"0x00000001\" ) assert (", "test_parse_enums(spec: xml.Element): candidates = tuple(_get_enum_nodes(spec)) all_enums = tuple(parse_required_enums(_collect_required(spec), candidates)) attrib_mask = next(e for", "== \"GL_DEPTH_BUFFER_BIT\").value == \"0x00000100\" ) assert clbuf_mask.is_bitmask assert ( next(v for v in", "_collect_required(spec: xml.Element): return tuple( get_feature_requirements( TESTED_FEATURE, tuple(_collect_features(spec)) ).enums.keys() ) def test_parse_enums(spec: xml.Element): candidates", "v.name == \"GL_ALL_ATTRIB_BITS\").value == \"0xFFFFFFFF\" ) assert ( next(v for v in attrib_mask.values", "\"\"\"Test enum definition parsing.\"\"\" import xml.etree.ElementTree as xml from gladiator.parse.enum import parse_required_enums from", "in all_enums if e.name == \"ClearBufferMask\") assert attrib_mask.is_bitmask assert ( next(v for v", "in attrib_mask.values if v.name == \"GL_ALL_ATTRIB_BITS\").value == \"0xFFFFFFFF\" ) assert ( next(v for", "attrib_mask.values if v.name == \"GL_ALL_ATTRIB_BITS\").value == \"0xFFFFFFFF\" ) assert ( next(v for v", "xml from gladiator.parse.enum import parse_required_enums from gladiator.parse.feature import ( get_feature_requirements, Feature, FeatureApi, FeatureVersion,", "for node in spec: if node.tag == \"enums\": yield node def _collect_features(spec: xml.Element):", "get_feature_requirements, Feature, FeatureApi, FeatureVersion, ) TESTED_FEATURE = Feature(api=FeatureApi.GL, version=FeatureVersion(major=1, minor=0)) def _get_enum_nodes(spec: xml.Element):", "def test_parse_enums(spec: xml.Element): candidates = tuple(_get_enum_nodes(spec)) all_enums = tuple(parse_required_enums(_collect_required(spec), candidates)) attrib_mask = next(e", "yield node def _collect_features(spec: xml.Element): for node in spec: if node.tag == \"feature\":", "def _collect_features(spec: xml.Element): for node in spec: if node.tag == \"feature\": yield node", "if e.name == \"AttribMask\") clbuf_mask = next(e for e in all_enums if e.name", "\"AttribMask\") clbuf_mask = next(e for e in all_enums if e.name == \"ClearBufferMask\") assert", "v in attrib_mask.values if v.name == \"GL_DEPTH_BUFFER_BIT\").value == \"0x00000100\" ) assert clbuf_mask.is_bitmask assert", "node.tag == \"enums\": yield node def _collect_features(spec: xml.Element): for node in spec: if", "\"GL_DEPTH_BUFFER_BIT\").value == \"0x00000100\" ) assert clbuf_mask.is_bitmask assert ( next(v for v in clbuf_mask.values", "next(v for v in attrib_mask.values if v.name == \"GL_CURRENT_BIT\").value == \"0x00000001\" ) assert", "assert ( next(v for v in attrib_mask.values if v.name == \"GL_DEPTH_BUFFER_BIT\").value == \"0x00000100\"", "from gladiator.parse.feature import ( get_feature_requirements, Feature, FeatureApi, FeatureVersion, ) TESTED_FEATURE = Feature(api=FeatureApi.GL, version=FeatureVersion(major=1,", "attrib_mask.is_bitmask assert ( next(v for v in attrib_mask.values if v.name == \"GL_CURRENT_BIT\").value ==", "candidates)) attrib_mask = next(e for e in all_enums if e.name == \"AttribMask\") clbuf_mask", "TESTED_FEATURE = Feature(api=FeatureApi.GL, version=FeatureVersion(major=1, minor=0)) def _get_enum_nodes(spec: xml.Element): for node in spec: if", "assert clbuf_mask.is_bitmask assert ( next(v for v in clbuf_mask.values if v.name == \"GL_DEPTH_BUFFER_BIT\").value", "assert ( next(v for v in clbuf_mask.values if v.name == \"GL_DEPTH_BUFFER_BIT\").value == \"0x00000100\"", "for node in spec: if node.tag == \"feature\": yield node def _collect_required(spec: xml.Element):", "in spec: if node.tag == \"enums\": yield node def _collect_features(spec: xml.Element): for node", "return tuple( get_feature_requirements( TESTED_FEATURE, tuple(_collect_features(spec)) ).enums.keys() ) def test_parse_enums(spec: xml.Element): candidates = tuple(_get_enum_nodes(spec))", "if v.name == \"GL_ALL_ATTRIB_BITS\").value == \"0xFFFFFFFF\" ) assert ( next(v for v in", "in all_enums if e.name == \"AttribMask\") clbuf_mask = next(e for e in all_enums", "enum definition parsing.\"\"\" import xml.etree.ElementTree as xml from gladiator.parse.enum import parse_required_enums from gladiator.parse.feature", "in spec: if node.tag == \"feature\": yield node def _collect_required(spec: xml.Element): return tuple(", "\"feature\": yield node def _collect_required(spec: xml.Element): return tuple( get_feature_requirements( TESTED_FEATURE, tuple(_collect_features(spec)) ).enums.keys() )", "( next(v for v in attrib_mask.values if v.name == \"GL_CURRENT_BIT\").value == \"0x00000001\" )", "== \"0x00000001\" ) assert ( next(v for v in attrib_mask.values if v.name ==", "v in attrib_mask.values if v.name == \"GL_ALL_ATTRIB_BITS\").value == \"0xFFFFFFFF\" ) assert ( next(v", "FeatureVersion, ) TESTED_FEATURE = Feature(api=FeatureApi.GL, version=FeatureVersion(major=1, minor=0)) def _get_enum_nodes(spec: xml.Element): for node in", "( next(v for v in attrib_mask.values if v.name == \"GL_ALL_ATTRIB_BITS\").value == \"0xFFFFFFFF\" )", "if v.name == \"GL_DEPTH_BUFFER_BIT\").value == \"0x00000100\" ) assert clbuf_mask.is_bitmask assert ( next(v for", "all_enums = tuple(parse_required_enums(_collect_required(spec), candidates)) attrib_mask = next(e for e in all_enums if e.name", ") def test_parse_enums(spec: xml.Element): candidates = tuple(_get_enum_nodes(spec)) all_enums = tuple(parse_required_enums(_collect_required(spec), candidates)) attrib_mask =", "for v in attrib_mask.values if v.name == \"GL_DEPTH_BUFFER_BIT\").value == \"0x00000100\" ) assert clbuf_mask.is_bitmask", "version=FeatureVersion(major=1, minor=0)) def _get_enum_nodes(spec: xml.Element): for node in spec: if node.tag == \"enums\":", "e.name == \"AttribMask\") clbuf_mask = next(e for e in all_enums if e.name ==", "= next(e for e in all_enums if e.name == \"AttribMask\") clbuf_mask = next(e", "( next(v for v in attrib_mask.values if v.name == \"GL_DEPTH_BUFFER_BIT\").value == \"0x00000100\" )", "node def _collect_required(spec: xml.Element): return tuple( get_feature_requirements( TESTED_FEATURE, tuple(_collect_features(spec)) ).enums.keys() ) def test_parse_enums(spec:", "get_feature_requirements( TESTED_FEATURE, tuple(_collect_features(spec)) ).enums.keys() ) def test_parse_enums(spec: xml.Element): candidates = tuple(_get_enum_nodes(spec)) all_enums =", "( next(v for v in clbuf_mask.values if v.name == \"GL_DEPTH_BUFFER_BIT\").value == \"0x00000100\" )", "v.name == \"GL_DEPTH_BUFFER_BIT\").value == \"0x00000100\" ) assert clbuf_mask.is_bitmask assert ( next(v for v", "all_enums if e.name == \"ClearBufferMask\") assert attrib_mask.is_bitmask assert ( next(v for v in", "\"enums\": yield node def _collect_features(spec: xml.Element): for node in spec: if node.tag ==", "parse_required_enums from gladiator.parse.feature import ( get_feature_requirements, Feature, FeatureApi, FeatureVersion, ) TESTED_FEATURE = Feature(api=FeatureApi.GL,", "minor=0)) def _get_enum_nodes(spec: xml.Element): for node in spec: if node.tag == \"enums\": yield", "attrib_mask = next(e for e in all_enums if e.name == \"AttribMask\") clbuf_mask =", "def _collect_required(spec: xml.Element): return tuple( get_feature_requirements( TESTED_FEATURE, tuple(_collect_features(spec)) ).enums.keys() ) def test_parse_enums(spec: xml.Element):", "in attrib_mask.values if v.name == \"GL_CURRENT_BIT\").value == \"0x00000001\" ) assert ( next(v for", "attrib_mask.values if v.name == \"GL_CURRENT_BIT\").value == \"0x00000001\" ) assert ( next(v for v", "clbuf_mask.is_bitmask assert ( next(v for v in clbuf_mask.values if v.name == \"GL_DEPTH_BUFFER_BIT\").value ==", "parsing.\"\"\" import xml.etree.ElementTree as xml from gladiator.parse.enum import parse_required_enums from gladiator.parse.feature import (", ") assert ( next(v for v in attrib_mask.values if v.name == \"GL_ALL_ATTRIB_BITS\").value ==", "\"GL_ALL_ATTRIB_BITS\").value == \"0xFFFFFFFF\" ) assert ( next(v for v in attrib_mask.values if v.name", "\"0x00000001\" ) assert ( next(v for v in attrib_mask.values if v.name == \"GL_ALL_ATTRIB_BITS\").value", "= tuple(_get_enum_nodes(spec)) all_enums = tuple(parse_required_enums(_collect_required(spec), candidates)) attrib_mask = next(e for e in all_enums", "node in spec: if node.tag == \"feature\": yield node def _collect_required(spec: xml.Element): return", "= tuple(parse_required_enums(_collect_required(spec), candidates)) attrib_mask = next(e for e in all_enums if e.name ==", "== \"GL_ALL_ATTRIB_BITS\").value == \"0xFFFFFFFF\" ) assert ( next(v for v in attrib_mask.values if", "_get_enum_nodes(spec: xml.Element): for node in spec: if node.tag == \"enums\": yield node def", "assert ( next(v for v in attrib_mask.values if v.name == \"GL_ALL_ATTRIB_BITS\").value == \"0xFFFFFFFF\"", "in attrib_mask.values if v.name == \"GL_DEPTH_BUFFER_BIT\").value == \"0x00000100\" ) assert clbuf_mask.is_bitmask assert (", "_collect_features(spec: xml.Element): for node in spec: if node.tag == \"feature\": yield node def", "FeatureApi, FeatureVersion, ) TESTED_FEATURE = Feature(api=FeatureApi.GL, version=FeatureVersion(major=1, minor=0)) def _get_enum_nodes(spec: xml.Element): for node", "v.name == \"GL_CURRENT_BIT\").value == \"0x00000001\" ) assert ( next(v for v in attrib_mask.values", "TESTED_FEATURE, tuple(_collect_features(spec)) ).enums.keys() ) def test_parse_enums(spec: xml.Element): candidates = tuple(_get_enum_nodes(spec)) all_enums = tuple(parse_required_enums(_collect_required(spec),", ").enums.keys() ) def test_parse_enums(spec: xml.Element): candidates = tuple(_get_enum_nodes(spec)) all_enums = tuple(parse_required_enums(_collect_required(spec), candidates)) attrib_mask", "== \"0xFFFFFFFF\" ) assert ( next(v for v in attrib_mask.values if v.name ==", "import parse_required_enums from gladiator.parse.feature import ( get_feature_requirements, Feature, FeatureApi, FeatureVersion, ) TESTED_FEATURE =", "== \"enums\": yield node def _collect_features(spec: xml.Element): for node in spec: if node.tag", ") assert clbuf_mask.is_bitmask assert ( next(v for v in clbuf_mask.values if v.name ==", "<gh_stars>0 \"\"\"Test enum definition parsing.\"\"\" import xml.etree.ElementTree as xml from gladiator.parse.enum import parse_required_enums", "for e in all_enums if e.name == \"AttribMask\") clbuf_mask = next(e for e", "all_enums if e.name == \"AttribMask\") clbuf_mask = next(e for e in all_enums if", "next(e for e in all_enums if e.name == \"AttribMask\") clbuf_mask = next(e for", "node in spec: if node.tag == \"enums\": yield node def _collect_features(spec: xml.Element): for", "Feature(api=FeatureApi.GL, version=FeatureVersion(major=1, minor=0)) def _get_enum_nodes(spec: xml.Element): for node in spec: if node.tag ==", "spec: if node.tag == \"enums\": yield node def _collect_features(spec: xml.Element): for node in", "gladiator.parse.enum import parse_required_enums from gladiator.parse.feature import ( get_feature_requirements, Feature, FeatureApi, FeatureVersion, ) TESTED_FEATURE", "definition parsing.\"\"\" import xml.etree.ElementTree as xml from gladiator.parse.enum import parse_required_enums from gladiator.parse.feature import", "spec: if node.tag == \"feature\": yield node def _collect_required(spec: xml.Element): return tuple( get_feature_requirements(", "== \"ClearBufferMask\") assert attrib_mask.is_bitmask assert ( next(v for v in attrib_mask.values if v.name", "== \"feature\": yield node def _collect_required(spec: xml.Element): return tuple( get_feature_requirements( TESTED_FEATURE, tuple(_collect_features(spec)) ).enums.keys()", "import ( get_feature_requirements, Feature, FeatureApi, FeatureVersion, ) TESTED_FEATURE = Feature(api=FeatureApi.GL, version=FeatureVersion(major=1, minor=0)) def", "next(v for v in attrib_mask.values if v.name == \"GL_ALL_ATTRIB_BITS\").value == \"0xFFFFFFFF\" ) assert", "= Feature(api=FeatureApi.GL, version=FeatureVersion(major=1, minor=0)) def _get_enum_nodes(spec: xml.Element): for node in spec: if node.tag", "xml.Element): return tuple( get_feature_requirements( TESTED_FEATURE, tuple(_collect_features(spec)) ).enums.keys() ) def test_parse_enums(spec: xml.Element): candidates =", "if e.name == \"ClearBufferMask\") assert attrib_mask.is_bitmask assert ( next(v for v in attrib_mask.values", "import xml.etree.ElementTree as xml from gladiator.parse.enum import parse_required_enums from gladiator.parse.feature import ( get_feature_requirements,", "for v in attrib_mask.values if v.name == \"GL_ALL_ATTRIB_BITS\").value == \"0xFFFFFFFF\" ) assert (", "node def _collect_features(spec: xml.Element): for node in spec: if node.tag == \"feature\": yield", "tuple(parse_required_enums(_collect_required(spec), candidates)) attrib_mask = next(e for e in all_enums if e.name == \"AttribMask\")", "Feature, FeatureApi, FeatureVersion, ) TESTED_FEATURE = Feature(api=FeatureApi.GL, version=FeatureVersion(major=1, minor=0)) def _get_enum_nodes(spec: xml.Element): for", "for e in all_enums if e.name == \"ClearBufferMask\") assert attrib_mask.is_bitmask assert ( next(v", "if v.name == \"GL_CURRENT_BIT\").value == \"0x00000001\" ) assert ( next(v for v in", "if node.tag == \"enums\": yield node def _collect_features(spec: xml.Element): for node in spec:", "== \"GL_CURRENT_BIT\").value == \"0x00000001\" ) assert ( next(v for v in attrib_mask.values if", "from gladiator.parse.enum import parse_required_enums from gladiator.parse.feature import ( get_feature_requirements, Feature, FeatureApi, FeatureVersion, )", "xml.etree.ElementTree as xml from gladiator.parse.enum import parse_required_enums from gladiator.parse.feature import ( get_feature_requirements, Feature,", "== \"0x00000100\" ) assert clbuf_mask.is_bitmask assert ( next(v for v in clbuf_mask.values if", "xml.Element): for node in spec: if node.tag == \"feature\": yield node def _collect_required(spec:", "clbuf_mask = next(e for e in all_enums if e.name == \"ClearBufferMask\") assert attrib_mask.is_bitmask", "\"ClearBufferMask\") assert attrib_mask.is_bitmask assert ( next(v for v in attrib_mask.values if v.name ==", "\"GL_CURRENT_BIT\").value == \"0x00000001\" ) assert ( next(v for v in attrib_mask.values if v.name", "assert attrib_mask.is_bitmask assert ( next(v for v in attrib_mask.values if v.name == \"GL_CURRENT_BIT\").value", "assert ( next(v for v in attrib_mask.values if v.name == \"GL_CURRENT_BIT\").value == \"0x00000001\"", "tuple(_get_enum_nodes(spec)) all_enums = tuple(parse_required_enums(_collect_required(spec), candidates)) attrib_mask = next(e for e in all_enums if", ") assert ( next(v for v in attrib_mask.values if v.name == \"GL_DEPTH_BUFFER_BIT\").value ==", "next(v for v in attrib_mask.values if v.name == \"GL_DEPTH_BUFFER_BIT\").value == \"0x00000100\" ) assert", "= next(e for e in all_enums if e.name == \"ClearBufferMask\") assert attrib_mask.is_bitmask assert", "yield node def _collect_required(spec: xml.Element): return tuple( get_feature_requirements( TESTED_FEATURE, tuple(_collect_features(spec)) ).enums.keys() ) def", "tuple(_collect_features(spec)) ).enums.keys() ) def test_parse_enums(spec: xml.Element): candidates = tuple(_get_enum_nodes(spec)) all_enums = tuple(parse_required_enums(_collect_required(spec), candidates))", "candidates = tuple(_get_enum_nodes(spec)) all_enums = tuple(parse_required_enums(_collect_required(spec), candidates)) attrib_mask = next(e for e in", "attrib_mask.values if v.name == \"GL_DEPTH_BUFFER_BIT\").value == \"0x00000100\" ) assert clbuf_mask.is_bitmask assert ( next(v", ") TESTED_FEATURE = Feature(api=FeatureApi.GL, version=FeatureVersion(major=1, minor=0)) def _get_enum_nodes(spec: xml.Element): for node in spec:", "== \"AttribMask\") clbuf_mask = next(e for e in all_enums if e.name == \"ClearBufferMask\")", "if node.tag == \"feature\": yield node def _collect_required(spec: xml.Element): return tuple( get_feature_requirements( TESTED_FEATURE,", "tuple( get_feature_requirements( TESTED_FEATURE, tuple(_collect_features(spec)) ).enums.keys() ) def test_parse_enums(spec: xml.Element): candidates = tuple(_get_enum_nodes(spec)) all_enums", "v in attrib_mask.values if v.name == \"GL_CURRENT_BIT\").value == \"0x00000001\" ) assert ( next(v", "e.name == \"ClearBufferMask\") assert attrib_mask.is_bitmask assert ( next(v for v in attrib_mask.values if", "e in all_enums if e.name == \"ClearBufferMask\") assert attrib_mask.is_bitmask assert ( next(v for", "def _get_enum_nodes(spec: xml.Element): for node in spec: if node.tag == \"enums\": yield node" ]
[ "- coef['a1'] - coef['a2'] coef['b0'] = yi coef['b1'] = alpha_i*coef['a1'] coef['b2'] = 3*(dy", "start: # # simRemoteApi.start(19999) # # then start simulation, and run this program.", "coef['a2'] coef['b0'] = yi coef['b1'] = alpha_i*coef['a1'] coef['b2'] = 3*(dy - alpha_f*dx) +", "th= np.arctan2(coef['b1'] + 2*coef['b2']*l + 3*coef['b3']*l**2, coef['a1'] + 2*coef['a2']*l + 3*coef['a3']*l**2) return np.array(list(zip(x,y,th)),dtype=float)", "sleep_time = 0.07): for i in path[:,0:2]: point2send = i packedData = sim.simxPackFloats(point2send.flatten())", "l = np.linspace(0,1,500) path = pathGenerator(coef, l) # print(path.shape) plt.plot(path[0,0], path[0,1], 'or', label='Start')", "1 coef['b1'] = dy #coef. livre coef['b2'] = 0 #coef. livre coef['a0'] =", "yi coef['b1'] = 2*(dy - alpha_f*dx) - alpha_f*coef['a3'] + coef['b3'] coef['b2'] = (2*alpha_f*dx", "pathGenerator(coef, l): x = coef['a0'] + coef['a1']*l + coef['a2']*l**2 + coef['a3']*l**3 y =", "(v-d*omega) omega_right = v_r/r_w omega_left = v_l/r_w return omega_right, omega_left def send_path_4_drawing(path, sleep_time", "coef['b2'] elif thi_test: print('Caso Especial #2') #caso especial 2 alpha_f = np.tan(thf) coef['a3']", "and (thf_test): print('Caso Especial #1') # caso especial 1 coef['b1'] = dy #coef.", "in the same folder as this file,') print ('or appropriately adjust the file", "xi dy = yf - yi coef = dict() thi_test = (np.pi/2.0 -", "especial 2 alpha_f = np.tan(thf) coef['a3'] = -dx/2.0 #coef. livre coef['b3'] = 0", "0 coef['a2'] = dx - coef['a3'] coef['b0'] = yi coef['b1'] = 2*(dy -", "# should be a corresponding call to simxFinish at the end! try: import", "to simxFinish at the end! try: import sim except: print ('--------------------------------------------------------------') print ('\"sim.py\"", "np.tan(thf) coef['a1'] = dx #coef. livre coef['a2'] = 0 #coef. livre coef['a0'] =", "print('Caso Especial #2') #caso especial 2 alpha_f = np.tan(thf) coef['a3'] = -dx/2.0 #coef.", "0) and (resP != 0) and (resT != 0): resA,ang = sim.simxGetObjectOrientation(clientID, robot_handle,", "except: print ('--------------------------------------------------------------') print ('\"sim.py\" could not be imported. This means very probably", "to have the server side running in CoppeliaSim: # in a child script", "= xi coef['a1'] = 0 coef['a2'] = 3*dx coef['a3'] = -2*dx coef['b0'] =", "0 coef['a2'] = 3*dx coef['a3'] = -2*dx coef['b0'] = yi coef['b3'] = dy", "3 alpha_i = np.tan(thi) coef['a1'] = 3*dx/2.0 #coef. livre coef['b2'] = 0 #coef.", "+ 3*coef['b3']*l**2, coef['a1'] + 2*coef['a2']*l + 3*coef['a3']*l**2) return np.array(list(zip(x,y,th)),dtype=float) ##################################### Connecting to simulator", "simulator!') pass time.sleep(sleep_time) # xi, yi, thi => ponto e orientação inicial #", "geral alpha_i = np.tan(thi) alpha_f = np.tan(thf) coef['a1'] = dx #coef. livre coef['a2']", "= 0 coef['a2'] = dx - coef['a3'] coef['b0'] = yi coef['b1'] = 2*(dy", "thi => ponto e orientação inicial # xf, yf, thf => ponto e", "= np.linspace(0,1,500) path = pathGenerator(coef, l) # print(path.shape) plt.plot(path[0,0], path[0,1], 'or', label='Start') plt.plot(path[-1,0],", "coef def pathGenerator(coef, l): x = coef['a0'] + coef['a1']*l + coef['a2']*l**2 + coef['a3']*l**3", "motors and the robot returnR, robot_handle = sim.simxGetObjectHandle(clientID,'Pioneer_p3dx',sim.simx_opmode_oneshot_wait) returnT, target_handle = sim.simxGetObjectHandle(clientID,'Target',sim.simx_opmode_oneshot_wait) if", "same folder as this file,') print ('or appropriately adjust the file \"sim.py\"') print", "dy #coef. livre coef['b2'] = 0 #coef. livre coef['a0'] = xi coef['a1'] =", "= np.tan(thf) coef['a1'] = dx #coef. livre coef['a2'] = 0 #coef. livre coef['a0']", "######################################## simulation ####################################### # Before closing the connection to CoppeliaSim, make sure that", "returnCode = sim.simxWriteStringStream(clientID, \"path_coord\", raw_bytes, sim.simx_opmode_oneshot) if returnCode != 0: # print('Error: fail", "(np.pi/2.0 + delta) if (thi_test) and (thf_test): print('Caso Especial #1') # caso especial", "# # simRemoteApi.start(19999) # # then start simulation, and run this program. #", "coef['b3'] = 0 #coef. livre (qualquer valor aqui) coef['a0'] = xi coef['a1'] =", "= 0.331 #wheel axis distance r_w = 0.09751 #wheel radius def pioneer_robot_model(v, omega):", "orientação inicial # xf, yf, thf => ponto e orientação final # coef", "label='Path') send_path_4_drawing(path) plt.title('Caminho gerado') plt.ylabel('y[m]') plt.xlabel('x[m]') plt.grid(True) plt.legend() plt.show() ####################################### graph configuration #######################################", "print('ERRO: Falha em GetObjectHandle para o Pioneer') exit() if returnT != 0: print('ERRO:", "while (resA != 0) and (resP != 0) and (resT != 0): resA,ang", "# IMPORTANT: for each successful call to simxStart, there # should be a", "coef['a2'] = dx - coef['a3'] coef['b0'] = yi coef['b1'] = 2*(dy - alpha_f*dx)", "coef['b1'] = alpha_i*coef['a1'] coef['b3'] = dy - alpha_i*coef['a1'] - coef['b2'] else: print('Caso Geral')", "child script of a CoppeliaSim scene, add following command # to be executed", "'-k', label='Path') send_path_4_drawing(path) plt.title('Caminho gerado') plt.ylabel('y[m]') plt.xlabel('x[m]') plt.grid(True) plt.legend() plt.show() ####################################### graph configuration", "< thf < (np.pi/2.0 + delta) if (thi_test) and (thf_test): print('Caso Especial #1')", "= 0 #coef. livre coef['a0'] = xi coef['a1'] = 0 coef['a2'] = 3*dx", "CoppeliaSim: # in a child script of a CoppeliaSim scene, add following command", "(clientID != 0): print('Falha na conexão') exit() print('Conectado!') # handlers of motors and", "and (resT != 0): resA,ang = sim.simxGetObjectOrientation(clientID, robot_handle, -1, sim.simx_opmode_streaming) resP,pos = sim.simxGetObjectPosition(clientID,robot_handle,-1,", "orientação final # coef : {a0,a1,a2,a3,b0,b1,b2,b3} (dicionario com os coeficientes do pol. de", "elif thf_test: print('Caso Especial #3') #caso especial 3 alpha_i = np.tan(thi) coef['a1'] =", "in a child script of a CoppeliaSim scene, add following command # to", "a CoppeliaSim scene, add following command # to be executed just once, at", "end! try: import sim except: print ('--------------------------------------------------------------') print ('\"sim.py\" could not be imported.", "omega_right, omega_left def send_path_4_drawing(path, sleep_time = 0.07): for i in path[:,0:2]: point2send =", "= np.tan(thi) alpha_f = np.tan(thf) coef['a1'] = dx #coef. livre coef['a2'] = 0", "sure that the last command sent out had time to arrive. You can", "import sim except: print ('--------------------------------------------------------------') print ('\"sim.py\" could not be imported. This means", "- coef['b2'] else: print('Caso Geral') #caso geral alpha_i = np.tan(thi) alpha_f = np.tan(thf)", "= np.tan(thi) coef['a1'] = 3*dx/2.0 #coef. livre coef['b2'] = 0 #coef. livre (qualquer", "0) and (resT != 0): resA,ang = sim.simxGetObjectOrientation(clientID, robot_handle, -1, sim.simx_opmode_streaming) resP,pos =", "#coef. livre coef['b2'] = 0 #coef. livre (qualquer valor aqui) coef['a0'] = xi", "alpha_f*dx) - alpha_f*coef['a3'] + coef['b3'] coef['b2'] = (2*alpha_f*dx - dy) + alpha_f*coef['a3'] -", "coef['b3'] = dy - coef['b1'] - coef['b2'] elif thi_test: print('Caso Especial #2') #caso", "#coef. livre coef['a2'] = 0 #coef. livre coef['a0'] = xi coef['a3'] = dx", "path[-1,1], 'ob', label='End') plt.plot(path[:,0], path[:,1], '-k', label='Path') send_path_4_drawing(path) plt.title('Caminho gerado') plt.ylabel('y[m]') plt.xlabel('x[m]') plt.grid(True)", "coef['a2'] = 3*dx coef['a3'] = -2*dx coef['b0'] = yi coef['b3'] = dy -", "if returnCode != 0: # print('Error: fail to send the path point to", "variables ############################################### time.sleep(0.5) resA,resP = 1, 1 resT = 1 while (resA !=", "- yi coef = dict() thi_test = (np.pi/2.0 - delta) < thi <", "returnT, target_handle = sim.simxGetObjectHandle(clientID,'Target',sim.simx_opmode_oneshot_wait) if returnR != 0: print('ERRO: Falha em GetObjectHandle para", "script of a CoppeliaSim scene, add following command # to be executed just", "= dict() thi_test = (np.pi/2.0 - delta) < thi < (np.pi/2.0 + delta)", "Target') exit() ######################################## variables ############################################### time.sleep(0.5) resA,resP = 1, 1 resT = 1", "!= 0: print('ERRO: Falha em GetObjectHandle para o Pioneer') exit() if returnT !=", "#caso especial 2 alpha_f = np.tan(thf) coef['a3'] = -dx/2.0 #coef. livre coef['b3'] =", "pi[1], pi[2], pf[0], pf[1], pf[2]) print('Coeficientes:') print(coef) l = np.linspace(0,1,500) path = pathGenerator(coef,", "plt.xlabel('x[m]') plt.grid(True) plt.legend() plt.show() ####################################### graph configuration ####################################### ######################################## simulation ####################################### # Before", "= -2*dx coef['b0'] = yi coef['b3'] = dy - coef['b1'] - coef['b2'] elif", "coef['a3'] = coef['a1'] - 2*dx coef['b0'] = yi coef['b1'] = alpha_i*coef['a1'] coef['b3'] =", "to be executed just once, at simulation start: # # simRemoteApi.start(19999) # #", "livre coef['b2'] = 0 #coef. livre coef['a0'] = xi coef['a1'] = 0 coef['a2']", "2*dy - (2*alpha_f - alpha_i)*coef['a1'] - alpha_f*coef['a2'] return coef def pathGenerator(coef, l): x", "def pathGenerator(coef, l): x = coef['a0'] + coef['a1']*l + coef['a2']*l**2 + coef['a3']*l**3 y", "returnCode != 0: # print('Error: fail to send the path point to the", "thf => ponto e orientação final # coef : {a0,a1,a2,a3,b0,b1,b2,b3} (dicionario com os", "#coef. livre coef['b2'] = 0 #coef. livre coef['a0'] = xi coef['a1'] = 0", "can guarantee this with (for example): sim.simxGetPingTime(clientID) # Now close the connection to", "coeficientes do pol. de grau 3) # return: coef def pathComputer(xi, yi, thi,", "alpha_f*dx) + 2*(alpha_f - alpha_i)*coef['a1'] + alpha_f*coef['a2'] coef['b3'] = 3*alpha_f*dx - 2*dy -", "livre coef['b2'] = 0 #coef. livre (qualquer valor aqui) coef['a0'] = xi coef['a2']", "if returnT != 0: print('ERRO: Falha em GetObjectHandle para o Target') exit() ########################################", "(2*alpha_f*dx - dy) + alpha_f*coef['a3'] - 2*coef['b3'] elif thf_test: print('Caso Especial #3') #caso", "Falha em GetObjectHandle para o Pioneer') exit() if returnT != 0: print('ERRO: Falha", "xf - xi dy = yf - yi coef = dict() thi_test =", "- dy) + alpha_f*coef['a3'] - 2*coef['b3'] elif thf_test: print('Caso Especial #3') #caso especial", "livre coef['a2'] = 0 #coef. livre coef['a0'] = xi coef['a3'] = dx -", "path = pathGenerator(coef, l) # print(path.shape) plt.plot(path[0,0], path[0,1], 'or', label='Start') plt.plot(path[-1,0], path[-1,1], 'ob',", "0.331 #wheel axis distance r_w = 0.09751 #wheel radius def pioneer_robot_model(v, omega): v_r", "coef['b1'] = 2*(dy - alpha_f*dx) - alpha_f*coef['a3'] + coef['b3'] coef['b2'] = (2*alpha_f*dx -", "as this file,') print ('or appropriately adjust the file \"sim.py\"') print ('--------------------------------------------------------------') print", "alpha_i = np.tan(thi) coef['a1'] = 3*dx/2.0 #coef. livre coef['b2'] = 0 #coef. livre", "#wheel axis distance r_w = 0.09751 #wheel radius def pioneer_robot_model(v, omega): v_r =", "delta) thf_test = (np.pi/2.0 - delta) < thf < (np.pi/2.0 + delta) if", "matplotlib.pyplot as plt import numpy as np import sys import ctypes #################################### Functions", "= 0 #coef. livre coef['a0'] = xi coef['a3'] = dx - coef['a1'] -", "= coef['b0'] + coef['b1']*l + coef['b2']*l**2 + coef['b3']*l**3 th= np.arctan2(coef['b1'] + 2*coef['b2']*l +", "ctypes #################################### Functions definition ############################################## # retorna Wd, We d = 0.331 #wheel", "('\"sim.py\" could not be imported. This means very probably that') print ('either \"sim.py\"", "Before closing the connection to CoppeliaSim, make sure that the last command sent", "############################################### time.sleep(0.5) resA,resP = 1, 1 resT = 1 while (resA != 0)", "v_l = (v-d*omega) omega_right = v_r/r_w omega_left = v_l/r_w return omega_right, omega_left def", "gerado') plt.ylabel('y[m]') plt.xlabel('x[m]') plt.grid(True) plt.legend() plt.show() ####################################### graph configuration ####################################### ######################################## simulation #######################################", "= alpha_i*coef['a1'] coef['b3'] = dy - alpha_i*coef['a1'] - coef['b2'] else: print('Caso Geral') #caso", "call to simxFinish at the end! try: import sim except: print ('--------------------------------------------------------------') print", "= yi coef['b3'] = dy - coef['b1'] - coef['b2'] elif thi_test: print('Caso Especial", "alpha_i = np.tan(thi) alpha_f = np.tan(thf) coef['a1'] = dx #coef. livre coef['a2'] =", "print ('--------------------------------------------------------------') print ('') import time import matplotlib.pyplot as plt import numpy as", "+ 2*coef['a2']*l + 3*coef['a3']*l**2) return np.array(list(zip(x,y,th)),dtype=float) ##################################### Connecting to simulator ################################################## clientID =", "np.array(list(zip(x,y,th)),dtype=float) ##################################### Connecting to simulator ################################################## clientID = sim.simxStart('127.0.0.1',19999,True,True,5000,5) if (clientID != 0):", "def pathComputer(xi, yi, thi, xf, yf, thf): delta = 0.001 dx = xf", "print ('either \"sim.py\" or the remoteApi library could not be found.') print ('Make", "< (np.pi/2.0 + delta) if (thi_test) and (thf_test): print('Caso Especial #1') # caso", "coef['a3'] coef['b0'] = yi coef['b1'] = 2*(dy - alpha_f*dx) - alpha_f*coef['a3'] + coef['b3']", "# caso especial 1 coef['b1'] = dy #coef. livre coef['b2'] = 0 #coef.", "retorna Wd, We d = 0.331 #wheel axis distance r_w = 0.09751 #wheel", "+ coef['b1']*l + coef['b2']*l**2 + coef['b3']*l**3 th= np.arctan2(coef['b1'] + 2*coef['b2']*l + 3*coef['b3']*l**2, coef['a1']", "not be found.') print ('Make sure both are in the same folder as", "xf, yf, thf => ponto e orientação final # coef : {a0,a1,a2,a3,b0,b1,b2,b3} (dicionario", "livre coef['a0'] = xi coef['a1'] = 0 coef['a2'] = 3*dx coef['a3'] = -2*dx", "livre (qualquer valor aqui) coef['a0'] = xi coef['a2'] = 3*dx - 2*coef['a1'] coef['a3']", "- alpha_f*coef['a3'] + coef['b3'] coef['b2'] = (2*alpha_f*dx - dy) + alpha_f*coef['a3'] - 2*coef['b3']", "numpy as np import sys import ctypes #################################### Functions definition ############################################## # retorna", "coef['b1']*l + coef['b2']*l**2 + coef['b3']*l**3 th= np.arctan2(coef['b1'] + 2*coef['b2']*l + 3*coef['b3']*l**2, coef['a1'] +", "('--------------------------------------------------------------') print ('') import time import matplotlib.pyplot as plt import numpy as np", "following command # to be executed just once, at simulation start: # #", "= dx - coef['a1'] - coef['a2'] coef['b0'] = yi coef['b1'] = alpha_i*coef['a1'] coef['b2']", "#2') #caso especial 2 alpha_f = np.tan(thf) coef['a3'] = -dx/2.0 #coef. livre coef['b3']", "the remoteApi library could not be found.') print ('Make sure both are in", "= sim.simxPackFloats(point2send.flatten()) raw_bytes = (ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData) returnCode = sim.simxWriteStringStream(clientID, \"path_coord\", raw_bytes, sim.simx_opmode_oneshot)", "to CoppeliaSim, make sure that the last command sent out had time to", "xi coef['a2'] = 3*dx - 2*coef['a1'] coef['a3'] = coef['a1'] - 2*dx coef['b0'] =", "livre coef['b3'] = 0 #coef. livre (qualquer valor aqui) coef['a0'] = xi coef['a1']", "valor aqui) coef['a0'] = xi coef['a2'] = 3*dx - 2*coef['a1'] coef['a3'] = coef['a1']", "sim.simx_opmode_oneshot) if returnCode != 0: # print('Error: fail to send the path point", "plt.show() ####################################### graph configuration ####################################### ######################################## simulation ####################################### # Before closing the connection", "else: print('Caso Geral') #caso geral alpha_i = np.tan(thi) alpha_f = np.tan(thf) coef['a1'] =", "l) # print(path.shape) plt.plot(path[0,0], path[0,1], 'or', label='Start') plt.plot(path[-1,0], path[-1,1], 'ob', label='End') plt.plot(path[:,0], path[:,1],", "time.sleep(0.5) resA,resP = 1, 1 resT = 1 while (resA != 0) and", "file,') print ('or appropriately adjust the file \"sim.py\"') print ('--------------------------------------------------------------') print ('') import", "coef['a0'] = xi coef['a1'] = 0 coef['a2'] = dx - coef['a3'] coef['b0'] =", "as np import sys import ctypes #################################### Functions definition ############################################## # retorna Wd,", "alpha_i*coef['a1'] - coef['b2'] else: print('Caso Geral') #caso geral alpha_i = np.tan(thi) alpha_f =", "ponto e orientação final # coef : {a0,a1,a2,a3,b0,b1,b2,b3} (dicionario com os coeficientes do", "= 0.09751 #wheel radius def pioneer_robot_model(v, omega): v_r = (v+d*omega) v_l = (v-d*omega)", "coef['a2'] = 0 #coef. livre coef['a0'] = xi coef['a3'] = dx - coef['a1']", "sim except: print ('--------------------------------------------------------------') print ('\"sim.py\" could not be imported. This means very", "= sim.simxGetObjectHandle(clientID,'Pioneer_p3dx',sim.simx_opmode_oneshot_wait) returnT, target_handle = sim.simxGetObjectHandle(clientID,'Target',sim.simx_opmode_oneshot_wait) if returnR != 0: print('ERRO: Falha em", "coef['b2'] = 3*(dy - alpha_f*dx) + 2*(alpha_f - alpha_i)*coef['a1'] + alpha_f*coef['a2'] coef['b3'] =", "# print(path.shape) plt.plot(path[0,0], path[0,1], 'or', label='Start') plt.plot(path[-1,0], path[-1,1], 'ob', label='End') plt.plot(path[:,0], path[:,1], '-k',", "#coef. livre coef['a0'] = xi coef['a1'] = 0 coef['a2'] = 3*dx coef['a3'] =", "################################################## clientID = sim.simxStart('127.0.0.1',19999,True,True,5000,5) if (clientID != 0): print('Falha na conexão') exit() print('Conectado!')", "conexão') exit() print('Conectado!') # handlers of motors and the robot returnR, robot_handle =", "then start simulation, and run this program. # # IMPORTANT: for each successful", "('--------------------------------------------------------------') print ('\"sim.py\" could not be imported. This means very probably that') print", "= pathGenerator(coef, l) # print(path.shape) plt.plot(path[0,0], path[0,1], 'or', label='Start') plt.plot(path[-1,0], path[-1,1], 'ob', label='End')", "GetObjectHandle para o Pioneer') exit() if returnT != 0: print('ERRO: Falha em GetObjectHandle", "- 2*coef['a1'] coef['a3'] = coef['a1'] - 2*dx coef['b0'] = yi coef['b1'] = alpha_i*coef['a1']", "as plt import numpy as np import sys import ctypes #################################### Functions definition", "CoppeliaSim, make sure that the last command sent out had time to arrive.", "coef['a3']*l**3 y = coef['b0'] + coef['b1']*l + coef['b2']*l**2 + coef['b3']*l**3 th= np.arctan2(coef['b1'] +", "= 0.07): for i in path[:,0:2]: point2send = i packedData = sim.simxPackFloats(point2send.flatten()) raw_bytes", "0 #coef. livre coef['a0'] = xi coef['a3'] = dx - coef['a1'] - coef['a2']", "of motors and the robot returnR, robot_handle = sim.simxGetObjectHandle(clientID,'Pioneer_p3dx',sim.simx_opmode_oneshot_wait) returnT, target_handle = sim.simxGetObjectHandle(clientID,'Target',sim.simx_opmode_oneshot_wait)", "# xf, yf, thf => ponto e orientação final # coef : {a0,a1,a2,a3,b0,b1,b2,b3}", "plt.ylabel('y[m]') plt.xlabel('x[m]') plt.grid(True) plt.legend() plt.show() ####################################### graph configuration ####################################### ######################################## simulation ####################################### #", "to simxStart, there # should be a corresponding call to simxFinish at the", "run this program. # # IMPORTANT: for each successful call to simxStart, there", "= sim.simxStart('127.0.0.1',19999,True,True,5000,5) if (clientID != 0): print('Falha na conexão') exit() print('Conectado!') # handlers", "pioneer_robot_model(v, omega): v_r = (v+d*omega) v_l = (v-d*omega) omega_right = v_r/r_w omega_left =", "coef['b2']*l**2 + coef['b3']*l**3 th= np.arctan2(coef['b1'] + 2*coef['b2']*l + 3*coef['b3']*l**2, coef['a1'] + 2*coef['a2']*l +", "add following command # to be executed just once, at simulation start: #", "omega_left = v_l/r_w return omega_right, omega_left def send_path_4_drawing(path, sleep_time = 0.07): for i", "the same folder as this file,') print ('or appropriately adjust the file \"sim.py\"')", "# return: coef def pathComputer(xi, yi, thi, xf, yf, thf): delta = 0.001", "CoppeliaSim scene, add following command # to be executed just once, at simulation", "(np.pi/2.0 + delta) thf_test = (np.pi/2.0 - delta) < thf < (np.pi/2.0 +", "#coef. livre (qualquer valor aqui) coef['a0'] = xi coef['a1'] = 0 coef['a2'] =", "=> ponto e orientação inicial # xf, yf, thf => ponto e orientação", "de grau 3) # return: coef def pathComputer(xi, yi, thi, xf, yf, thf):", "- coef['a2'] coef['b0'] = yi coef['b1'] = alpha_i*coef['a1'] coef['b2'] = 3*(dy - alpha_f*dx)", "dx - coef['a3'] coef['b0'] = yi coef['b1'] = 2*(dy - alpha_f*dx) - alpha_f*coef['a3']", "radius def pioneer_robot_model(v, omega): v_r = (v+d*omega) v_l = (v-d*omega) omega_right = v_r/r_w", "\"sim.py\" or the remoteApi library could not be found.') print ('Make sure both", "resT = 1 while (resA != 0) and (resP != 0) and (resT", "('Make sure both are in the same folder as this file,') print ('or", "could not be imported. This means very probably that') print ('either \"sim.py\" or", "na conexão') exit() print('Conectado!') # handlers of motors and the robot returnR, robot_handle", "clientID = sim.simxStart('127.0.0.1',19999,True,True,5000,5) if (clientID != 0): print('Falha na conexão') exit() print('Conectado!') #", "= (np.pi/2.0 - delta) < thi < (np.pi/2.0 + delta) thf_test = (np.pi/2.0", "x = coef['a0'] + coef['a1']*l + coef['a2']*l**2 + coef['a3']*l**3 y = coef['b0'] +", "('either \"sim.py\" or the remoteApi library could not be found.') print ('Make sure", "print ('') import time import matplotlib.pyplot as plt import numpy as np import", "= alpha_i*coef['a1'] coef['b2'] = 3*(dy - alpha_f*dx) + 2*(alpha_f - alpha_i)*coef['a1'] + alpha_f*coef['a2']", "remoteApi library could not be found.') print ('Make sure both are in the", "(thf_test): print('Caso Especial #1') # caso especial 1 coef['b1'] = dy #coef. livre", "just once, at simulation start: # # simRemoteApi.start(19999) # # then start simulation,", "simxStart, there # should be a corresponding call to simxFinish at the end!", "= v_r/r_w omega_left = v_l/r_w return omega_right, omega_left def send_path_4_drawing(path, sleep_time = 0.07):", "distance r_w = 0.09751 #wheel radius def pioneer_robot_model(v, omega): v_r = (v+d*omega) v_l", "+ coef['a3']*l**3 y = coef['b0'] + coef['b1']*l + coef['b2']*l**2 + coef['b3']*l**3 th= np.arctan2(coef['b1']", "and (resP != 0) and (resT != 0): resA,ang = sim.simxGetObjectOrientation(clientID, robot_handle, -1,", "start simulation, and run this program. # # IMPORTANT: for each successful call", "side running in CoppeliaSim: # in a child script of a CoppeliaSim scene,", "sim.simxStart('127.0.0.1',19999,True,True,5000,5) if (clientID != 0): print('Falha na conexão') exit() print('Conectado!') # handlers of", "print(path.shape) plt.plot(path[0,0], path[0,1], 'or', label='Start') plt.plot(path[-1,0], path[-1,1], 'ob', label='End') plt.plot(path[:,0], path[:,1], '-k', label='Path')", "coef = dict() thi_test = (np.pi/2.0 - delta) < thi < (np.pi/2.0 +", "xi coef['a1'] = 0 coef['a2'] = dx - coef['a3'] coef['b0'] = yi coef['b1']", "be executed just once, at simulation start: # # simRemoteApi.start(19999) # # then", "coef['b3'] = dy - alpha_i*coef['a1'] - coef['b2'] else: print('Caso Geral') #caso geral alpha_i", "thf): delta = 0.001 dx = xf - xi dy = yf -", "sim.simxGetObjectHandle(clientID,'Pioneer_p3dx',sim.simx_opmode_oneshot_wait) returnT, target_handle = sim.simxGetObjectHandle(clientID,'Target',sim.simx_opmode_oneshot_wait) if returnR != 0: print('ERRO: Falha em GetObjectHandle", "path point to the simulator!') pass time.sleep(sleep_time) # xi, yi, thi => ponto", "print('Caso Especial #1') # caso especial 1 coef['b1'] = dy #coef. livre coef['b2']", "resA,ang = sim.simxGetObjectOrientation(clientID, robot_handle, -1, sim.simx_opmode_streaming) resP,pos = sim.simxGetObjectPosition(clientID,robot_handle,-1, sim.simx_opmode_streaming) resT,posT= sim.simxGetObjectPosition(clientID,target_handle,-1, sim.simx_opmode_streaming)", "omega_left def send_path_4_drawing(path, sleep_time = 0.07): for i in path[:,0:2]: point2send = i", "# # then start simulation, and run this program. # # IMPORTANT: for", "0: print('ERRO: Falha em GetObjectHandle para o Pioneer') exit() if returnT != 0:", "should be a corresponding call to simxFinish at the end! try: import sim", "!= 0: print('ERRO: Falha em GetObjectHandle para o Target') exit() ######################################## variables ###############################################", "alpha_i*coef['a1'] coef['b3'] = dy - alpha_i*coef['a1'] - coef['b2'] else: print('Caso Geral') #caso geral", "+ alpha_f*coef['a3'] - 2*coef['b3'] elif thf_test: print('Caso Especial #3') #caso especial 3 alpha_i", "library could not be found.') print ('Make sure both are in the same", "= v_l/r_w return omega_right, omega_left def send_path_4_drawing(path, sleep_time = 0.07): for i in", "2*coef['a1'] coef['a3'] = coef['a1'] - 2*dx coef['b0'] = yi coef['b1'] = alpha_i*coef['a1'] coef['b3']", "the robot returnR, robot_handle = sim.simxGetObjectHandle(clientID,'Pioneer_p3dx',sim.simx_opmode_oneshot_wait) returnT, target_handle = sim.simxGetObjectHandle(clientID,'Target',sim.simx_opmode_oneshot_wait) if returnR !=", "plt.title('Caminho gerado') plt.ylabel('y[m]') plt.xlabel('x[m]') plt.grid(True) plt.legend() plt.show() ####################################### graph configuration ####################################### ######################################## simulation", "# simRemoteApi.start(19999) # # then start simulation, and run this program. # #", "sim.simxGetObjectHandle(clientID,'Target',sim.simx_opmode_oneshot_wait) if returnR != 0: print('ERRO: Falha em GetObjectHandle para o Pioneer') exit()", "coef['b1'] - coef['b2'] elif thi_test: print('Caso Especial #2') #caso especial 2 alpha_f =", "coef['b3'] coef['b2'] = (2*alpha_f*dx - dy) + alpha_f*coef['a3'] - 2*coef['b3'] elif thf_test: print('Caso", "if returnR != 0: print('ERRO: Falha em GetObjectHandle para o Pioneer') exit() if", "coef = pathComputer(pi[0], pi[1], pi[2], pf[0], pf[1], pf[2]) print('Coeficientes:') print(coef) l = np.linspace(0,1,500)", "alpha_f*coef['a3'] - 2*coef['b3'] elif thf_test: print('Caso Especial #3') #caso especial 3 alpha_i =", "each successful call to simxStart, there # should be a corresponding call to", "be a corresponding call to simxFinish at the end! try: import sim except:", "= (2*alpha_f*dx - dy) + alpha_f*coef['a3'] - 2*coef['b3'] elif thf_test: print('Caso Especial #3')", "plt.plot(path[0,0], path[0,1], 'or', label='Start') plt.plot(path[-1,0], path[-1,1], 'ob', label='End') plt.plot(path[:,0], path[:,1], '-k', label='Path') send_path_4_drawing(path)", "coef['b1'] = alpha_i*coef['a1'] coef['b2'] = 3*(dy - alpha_f*dx) + 2*(alpha_f - alpha_i)*coef['a1'] +", "successful call to simxStart, there # should be a corresponding call to simxFinish", "sim.simxWriteStringStream(clientID, \"path_coord\", raw_bytes, sim.simx_opmode_oneshot) if returnCode != 0: # print('Error: fail to send", "# Before closing the connection to CoppeliaSim, make sure that the last command", "thf_test = (np.pi/2.0 - delta) < thf < (np.pi/2.0 + delta) if (thi_test)", "- alpha_f*coef['a2'] return coef def pathGenerator(coef, l): x = coef['a0'] + coef['a1']*l +", "return omega_right, omega_left def send_path_4_drawing(path, sleep_time = 0.07): for i in path[:,0:2]: point2send", "elif thi_test: print('Caso Especial #2') #caso especial 2 alpha_f = np.tan(thf) coef['a3'] =", "coef['a1'] = 0 coef['a2'] = dx - coef['a3'] coef['b0'] = yi coef['b1'] =", "import numpy as np import sys import ctypes #################################### Functions definition ############################################## #", "(resP != 0) and (resT != 0): resA,ang = sim.simxGetObjectOrientation(clientID, robot_handle, -1, sim.simx_opmode_streaming)", "coef['a3'] = -dx/2.0 #coef. livre coef['b3'] = 0 #coef. livre (qualquer valor aqui)", "packedData = sim.simxPackFloats(point2send.flatten()) raw_bytes = (ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData) returnCode = sim.simxWriteStringStream(clientID, \"path_coord\", raw_bytes,", "at the end! try: import sim except: print ('--------------------------------------------------------------') print ('\"sim.py\" could not", "valor aqui) coef['a0'] = xi coef['a1'] = 0 coef['a2'] = dx - coef['a3']", "1, 1 resT = 1 while (resA != 0) and (resP != 0)", "!= 0) and (resT != 0): resA,ang = sim.simxGetObjectOrientation(clientID, robot_handle, -1, sim.simx_opmode_streaming) resP,pos", "executed just once, at simulation start: # # simRemoteApi.start(19999) # # then start", "3*(dy - alpha_f*dx) + 2*(alpha_f - alpha_i)*coef['a1'] + alpha_f*coef['a2'] coef['b3'] = 3*alpha_f*dx -", "resA,resP = 1, 1 resT = 1 while (resA != 0) and (resP", "path[:,1], '-k', label='Path') send_path_4_drawing(path) plt.title('Caminho gerado') plt.ylabel('y[m]') plt.xlabel('x[m]') plt.grid(True) plt.legend() plt.show() ####################################### graph", "coef['b0'] = yi coef['b1'] = alpha_i*coef['a1'] coef['b2'] = 3*(dy - alpha_f*dx) + 2*(alpha_f", "xi coef['a3'] = dx - coef['a1'] - coef['a2'] coef['b0'] = yi coef['b1'] =", "connection to CoppeliaSim, make sure that the last command sent out had time", "time to arrive. You can guarantee this with (for example): sim.simxGetPingTime(clientID) # Now", "yf - yi coef = dict() thi_test = (np.pi/2.0 - delta) < thi", "at simulation start: # # simRemoteApi.start(19999) # # then start simulation, and run", "= yi coef['b1'] = alpha_i*coef['a1'] coef['b2'] = 3*(dy - alpha_f*dx) + 2*(alpha_f -", "# in a child script of a CoppeliaSim scene, add following command #", "3*dx/2.0 #coef. livre coef['b2'] = 0 #coef. livre (qualquer valor aqui) coef['a0'] =", "have the server side running in CoppeliaSim: # in a child script of", "= 3*dx coef['a3'] = -2*dx coef['b0'] = yi coef['b3'] = dy - coef['b1']", "print ('\"sim.py\" could not be imported. This means very probably that') print ('either", "pos[1], ang[2]] pf = [posT[0], posT[1],0.0] coef = pathComputer(pi[0], pi[1], pi[2], pf[0], pf[1],", "coef['b0'] + coef['b1']*l + coef['b2']*l**2 + coef['b3']*l**3 th= np.arctan2(coef['b1'] + 2*coef['b2']*l + 3*coef['b3']*l**2,", "delta = 0.001 dx = xf - xi dy = yf - yi", "3*coef['b3']*l**2, coef['a1'] + 2*coef['a2']*l + 3*coef['a3']*l**2) return np.array(list(zip(x,y,th)),dtype=float) ##################################### Connecting to simulator ##################################################", "import time import matplotlib.pyplot as plt import numpy as np import sys import", "livre (qualquer valor aqui) coef['a0'] = xi coef['a1'] = 0 coef['a2'] = dx", "resP,pos = sim.simxGetObjectPosition(clientID,robot_handle,-1, sim.simx_opmode_streaming) resT,posT= sim.simxGetObjectPosition(clientID,target_handle,-1, sim.simx_opmode_streaming) pi = [pos[0], pos[1], ang[2]] pf", "resT,posT= sim.simxGetObjectPosition(clientID,target_handle,-1, sim.simx_opmode_streaming) pi = [pos[0], pos[1], ang[2]] pf = [posT[0], posT[1],0.0] coef", "0: print('ERRO: Falha em GetObjectHandle para o Target') exit() ######################################## variables ############################################### time.sleep(0.5)", "imported. This means very probably that') print ('either \"sim.py\" or the remoteApi library", "plt.legend() plt.show() ####################################### graph configuration ####################################### ######################################## simulation ####################################### # Before closing the", "- xi dy = yf - yi coef = dict() thi_test = (np.pi/2.0", "3*alpha_f*dx - 2*dy - (2*alpha_f - alpha_i)*coef['a1'] - alpha_f*coef['a2'] return coef def pathGenerator(coef,", "- coef['a3'] coef['b0'] = yi coef['b1'] = 2*(dy - alpha_f*dx) - alpha_f*coef['a3'] +", "0.09751 #wheel radius def pioneer_robot_model(v, omega): v_r = (v+d*omega) v_l = (v-d*omega) omega_right", "try: import sim except: print ('--------------------------------------------------------------') print ('\"sim.py\" could not be imported. This", "coef['b0'] = yi coef['b1'] = 2*(dy - alpha_f*dx) - alpha_f*coef['a3'] + coef['b3'] coef['b2']", "< thi < (np.pi/2.0 + delta) thf_test = (np.pi/2.0 - delta) < thf", "for each successful call to simxStart, there # should be a corresponding call", "+ delta) if (thi_test) and (thf_test): print('Caso Especial #1') # caso especial 1", "return: coef def pathComputer(xi, yi, thi, xf, yf, thf): delta = 0.001 dx", "v_l/r_w return omega_right, omega_left def send_path_4_drawing(path, sleep_time = 0.07): for i in path[:,0:2]:", "= dx - coef['a3'] coef['b0'] = yi coef['b1'] = 2*(dy - alpha_f*dx) -", "def pioneer_robot_model(v, omega): v_r = (v+d*omega) v_l = (v-d*omega) omega_right = v_r/r_w omega_left", "coef['a0'] = xi coef['a3'] = dx - coef['a1'] - coef['a2'] coef['b0'] = yi", "= 0 #coef. livre (qualquer valor aqui) coef['a0'] = xi coef['a1'] = 0", "that the last command sent out had time to arrive. You can guarantee", "(resA != 0) and (resP != 0) and (resT != 0): resA,ang =", "# handlers of motors and the robot returnR, robot_handle = sim.simxGetObjectHandle(clientID,'Pioneer_p3dx',sim.simx_opmode_oneshot_wait) returnT, target_handle", "probably that') print ('either \"sim.py\" or the remoteApi library could not be found.')", "thf < (np.pi/2.0 + delta) if (thi_test) and (thf_test): print('Caso Especial #1') #", "- alpha_i)*coef['a1'] - alpha_f*coef['a2'] return coef def pathGenerator(coef, l): x = coef['a0'] +", "yi coef = dict() thi_test = (np.pi/2.0 - delta) < thi < (np.pi/2.0", "= 1 while (resA != 0) and (resP != 0) and (resT !=", "= dy - alpha_i*coef['a1'] - coef['b2'] else: print('Caso Geral') #caso geral alpha_i =", "0): resA,ang = sim.simxGetObjectOrientation(clientID, robot_handle, -1, sim.simx_opmode_streaming) resP,pos = sim.simxGetObjectPosition(clientID,robot_handle,-1, sim.simx_opmode_streaming) resT,posT= sim.simxGetObjectPosition(clientID,target_handle,-1,", "coef['b0'] = yi coef['b3'] = dy - coef['b1'] - coef['b2'] elif thi_test: print('Caso", "both are in the same folder as this file,') print ('or appropriately adjust", "dx #coef. livre coef['a2'] = 0 #coef. livre coef['a0'] = xi coef['a3'] =", "This means very probably that') print ('either \"sim.py\" or the remoteApi library could", "i packedData = sim.simxPackFloats(point2send.flatten()) raw_bytes = (ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData) returnCode = sim.simxWriteStringStream(clientID, \"path_coord\",", "coef['a2'] = 3*dx - 2*coef['a1'] coef['a3'] = coef['a1'] - 2*dx coef['b0'] = yi", "coef : {a0,a1,a2,a3,b0,b1,b2,b3} (dicionario com os coeficientes do pol. de grau 3) #", "arrive. You can guarantee this with (for example): sim.simxGetPingTime(clientID) # Now close the", "= dx #coef. livre coef['a2'] = 0 #coef. livre coef['a0'] = xi coef['a3']", "could not be found.') print ('Make sure both are in the same folder", "coef['b3']*l**3 th= np.arctan2(coef['b1'] + 2*coef['b2']*l + 3*coef['b3']*l**2, coef['a1'] + 2*coef['a2']*l + 3*coef['a3']*l**2) return", "raw_bytes = (ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData) returnCode = sim.simxWriteStringStream(clientID, \"path_coord\", raw_bytes, sim.simx_opmode_oneshot) if returnCode", "axis distance r_w = 0.09751 #wheel radius def pioneer_robot_model(v, omega): v_r = (v+d*omega)", "= 3*dx/2.0 #coef. livre coef['b2'] = 0 #coef. livre (qualquer valor aqui) coef['a0']", "thi, xf, yf, thf): delta = 0.001 dx = xf - xi dy", "the path point to the simulator!') pass time.sleep(sleep_time) # xi, yi, thi =>", "exit() print('Conectado!') # handlers of motors and the robot returnR, robot_handle = sim.simxGetObjectHandle(clientID,'Pioneer_p3dx',sim.simx_opmode_oneshot_wait)", "= sim.simxGetObjectPosition(clientID,robot_handle,-1, sim.simx_opmode_streaming) resT,posT= sim.simxGetObjectPosition(clientID,target_handle,-1, sim.simx_opmode_streaming) pi = [pos[0], pos[1], ang[2]] pf =", "pf[0], pf[1], pf[2]) print('Coeficientes:') print(coef) l = np.linspace(0,1,500) path = pathGenerator(coef, l) #", "You can guarantee this with (for example): sim.simxGetPingTime(clientID) # Now close the connection", "\"sim.py\"') print ('--------------------------------------------------------------') print ('') import time import matplotlib.pyplot as plt import numpy", "+ 3*coef['a3']*l**2) return np.array(list(zip(x,y,th)),dtype=float) ##################################### Connecting to simulator ################################################## clientID = sim.simxStart('127.0.0.1',19999,True,True,5000,5) if", "alpha_i)*coef['a1'] + alpha_f*coef['a2'] coef['b3'] = 3*alpha_f*dx - 2*dy - (2*alpha_f - alpha_i)*coef['a1'] -", "os coeficientes do pol. de grau 3) # return: coef def pathComputer(xi, yi,", "####################################### graph configuration ####################################### ######################################## simulation ####################################### # Before closing the connection to", "sure both are in the same folder as this file,') print ('or appropriately", "simulation ####################################### # Before closing the connection to CoppeliaSim, make sure that the", "3*dx coef['a3'] = -2*dx coef['b0'] = yi coef['b3'] = dy - coef['b1'] -", "-2*dx coef['b0'] = yi coef['b3'] = dy - coef['b1'] - coef['b2'] elif thi_test:", "-dx/2.0 #coef. livre coef['b3'] = 0 #coef. livre (qualquer valor aqui) coef['a0'] =", "####################################### # Before closing the connection to CoppeliaSim, make sure that the last", "coef['a1'] = 3*dx/2.0 #coef. livre coef['b2'] = 0 #coef. livre (qualquer valor aqui)", "= 0 #coef. livre (qualquer valor aqui) coef['a0'] = xi coef['a2'] = 3*dx", "< (np.pi/2.0 + delta) thf_test = (np.pi/2.0 - delta) < thf < (np.pi/2.0", "sure to have the server side running in CoppeliaSim: # in a child", "print('Error: fail to send the path point to the simulator!') pass time.sleep(sleep_time) #", "o Pioneer') exit() if returnT != 0: print('ERRO: Falha em GetObjectHandle para o", "np import sys import ctypes #################################### Functions definition ############################################## # retorna Wd, We", "+ coef['b3']*l**3 th= np.arctan2(coef['b1'] + 2*coef['b2']*l + 3*coef['b3']*l**2, coef['a1'] + 2*coef['a2']*l + 3*coef['a3']*l**2)", "pathGenerator(coef, l) # print(path.shape) plt.plot(path[0,0], path[0,1], 'or', label='Start') plt.plot(path[-1,0], path[-1,1], 'ob', label='End') plt.plot(path[:,0],", "alpha_f = np.tan(thf) coef['a3'] = -dx/2.0 #coef. livre coef['b3'] = 0 #coef. livre", "- alpha_f*dx) + 2*(alpha_f - alpha_i)*coef['a1'] + alpha_f*coef['a2'] coef['b3'] = 3*alpha_f*dx - 2*dy", "import sys import ctypes #################################### Functions definition ############################################## # retorna Wd, We d", "do pol. de grau 3) # return: coef def pathComputer(xi, yi, thi, xf,", "omega_right = v_r/r_w omega_left = v_l/r_w return omega_right, omega_left def send_path_4_drawing(path, sleep_time =", "sim.simxGetObjectPosition(clientID,robot_handle,-1, sim.simx_opmode_streaming) resT,posT= sim.simxGetObjectPosition(clientID,target_handle,-1, sim.simx_opmode_streaming) pi = [pos[0], pos[1], ang[2]] pf = [posT[0],", "('or appropriately adjust the file \"sim.py\"') print ('--------------------------------------------------------------') print ('') import time import", "the last command sent out had time to arrive. You can guarantee this", "= xi coef['a3'] = dx - coef['a1'] - coef['a2'] coef['b0'] = yi coef['b1']", "robot_handle, -1, sim.simx_opmode_streaming) resP,pos = sim.simxGetObjectPosition(clientID,robot_handle,-1, sim.simx_opmode_streaming) resT,posT= sim.simxGetObjectPosition(clientID,target_handle,-1, sim.simx_opmode_streaming) pi = [pos[0],", "of a CoppeliaSim scene, add following command # to be executed just once,", "simxFinish at the end! try: import sim except: print ('--------------------------------------------------------------') print ('\"sim.py\" could", "2*coef['a2']*l + 3*coef['a3']*l**2) return np.array(list(zip(x,y,th)),dtype=float) ##################################### Connecting to simulator ################################################## clientID = sim.simxStart('127.0.0.1',19999,True,True,5000,5)", "np.linspace(0,1,500) path = pathGenerator(coef, l) # print(path.shape) plt.plot(path[0,0], path[0,1], 'or', label='Start') plt.plot(path[-1,0], path[-1,1],", "- alpha_f*dx) - alpha_f*coef['a3'] + coef['b3'] coef['b2'] = (2*alpha_f*dx - dy) + alpha_f*coef['a3']", "#caso geral alpha_i = np.tan(thi) alpha_f = np.tan(thf) coef['a1'] = dx #coef. livre", "2*(dy - alpha_f*dx) - alpha_f*coef['a3'] + coef['b3'] coef['b2'] = (2*alpha_f*dx - dy) +", "= coef['a1'] - 2*dx coef['b0'] = yi coef['b1'] = alpha_i*coef['a1'] coef['b3'] = dy", "com os coeficientes do pol. de grau 3) # return: coef def pathComputer(xi,", "return np.array(list(zip(x,y,th)),dtype=float) ##################################### Connecting to simulator ################################################## clientID = sim.simxStart('127.0.0.1',19999,True,True,5000,5) if (clientID !=", "Falha em GetObjectHandle para o Target') exit() ######################################## variables ############################################### time.sleep(0.5) resA,resP =", "coef['a1'] + 2*coef['a2']*l + 3*coef['a3']*l**2) return np.array(list(zip(x,y,th)),dtype=float) ##################################### Connecting to simulator ################################################## clientID", "(thi_test) and (thf_test): print('Caso Especial #1') # caso especial 1 coef['b1'] = dy", "not be imported. This means very probably that') print ('either \"sim.py\" or the", "v_r/r_w omega_left = v_l/r_w return omega_right, omega_left def send_path_4_drawing(path, sleep_time = 0.07): for", "handlers of motors and the robot returnR, robot_handle = sim.simxGetObjectHandle(clientID,'Pioneer_p3dx',sim.simx_opmode_oneshot_wait) returnT, target_handle =", "program. # # IMPORTANT: for each successful call to simxStart, there # should", "para o Pioneer') exit() if returnT != 0: print('ERRO: Falha em GetObjectHandle para", "robot_handle = sim.simxGetObjectHandle(clientID,'Pioneer_p3dx',sim.simx_opmode_oneshot_wait) returnT, target_handle = sim.simxGetObjectHandle(clientID,'Target',sim.simx_opmode_oneshot_wait) if returnR != 0: print('ERRO: Falha", "thi_test = (np.pi/2.0 - delta) < thi < (np.pi/2.0 + delta) thf_test =", "= dy - coef['b1'] - coef['b2'] elif thi_test: print('Caso Especial #2') #caso especial", "point to the simulator!') pass time.sleep(sleep_time) # xi, yi, thi => ponto e", "i in path[:,0:2]: point2send = i packedData = sim.simxPackFloats(point2send.flatten()) raw_bytes = (ctypes.c_ubyte *", "this program. # # IMPORTANT: for each successful call to simxStart, there #", "Pioneer') exit() if returnT != 0: print('ERRO: Falha em GetObjectHandle para o Target')", "IMPORTANT: for each successful call to simxStart, there # should be a corresponding", "2*dx coef['b0'] = yi coef['b1'] = alpha_i*coef['a1'] coef['b3'] = dy - alpha_i*coef['a1'] -", "pol. de grau 3) # return: coef def pathComputer(xi, yi, thi, xf, yf,", "pathComputer(pi[0], pi[1], pi[2], pf[0], pf[1], pf[2]) print('Coeficientes:') print(coef) l = np.linspace(0,1,500) path =", "plt.grid(True) plt.legend() plt.show() ####################################### graph configuration ####################################### ######################################## simulation ####################################### # Before closing", "- 2*coef['b3'] elif thf_test: print('Caso Especial #3') #caso especial 3 alpha_i = np.tan(thi)", "print('Caso Geral') #caso geral alpha_i = np.tan(thi) alpha_f = np.tan(thf) coef['a1'] = dx", "time.sleep(sleep_time) # xi, yi, thi => ponto e orientação inicial # xf, yf,", "a child script of a CoppeliaSim scene, add following command # to be", "final # coef : {a0,a1,a2,a3,b0,b1,b2,b3} (dicionario com os coeficientes do pol. de grau", "be found.') print ('Make sure both are in the same folder as this", "last command sent out had time to arrive. You can guarantee this with", "!= 0): print('Falha na conexão') exit() print('Conectado!') # handlers of motors and the", "the server side running in CoppeliaSim: # in a child script of a", "fail to send the path point to the simulator!') pass time.sleep(sleep_time) # xi,", "make sure that the last command sent out had time to arrive. You", "(resT != 0): resA,ang = sim.simxGetObjectOrientation(clientID, robot_handle, -1, sim.simx_opmode_streaming) resP,pos = sim.simxGetObjectPosition(clientID,robot_handle,-1, sim.simx_opmode_streaming)", "had time to arrive. You can guarantee this with (for example): sim.simxGetPingTime(clientID) #", "+ coef['b2']*l**2 + coef['b3']*l**3 th= np.arctan2(coef['b1'] + 2*coef['b2']*l + 3*coef['b3']*l**2, coef['a1'] + 2*coef['a2']*l", "3*coef['a3']*l**2) return np.array(list(zip(x,y,th)),dtype=float) ##################################### Connecting to simulator ################################################## clientID = sim.simxStart('127.0.0.1',19999,True,True,5000,5) if (clientID", "r_w = 0.09751 #wheel radius def pioneer_robot_model(v, omega): v_r = (v+d*omega) v_l =", "sim.simxPackFloats(point2send.flatten()) raw_bytes = (ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData) returnCode = sim.simxWriteStringStream(clientID, \"path_coord\", raw_bytes, sim.simx_opmode_oneshot) if", "xi coef['a1'] = 0 coef['a2'] = 3*dx coef['a3'] = -2*dx coef['b0'] = yi", "dy = yf - yi coef = dict() thi_test = (np.pi/2.0 - delta)", "1 while (resA != 0) and (resP != 0) and (resT != 0):", "= pathComputer(pi[0], pi[1], pi[2], pf[0], pf[1], pf[2]) print('Coeficientes:') print(coef) l = np.linspace(0,1,500) path", "out had time to arrive. You can guarantee this with (for example): sim.simxGetPingTime(clientID)", "alpha_i*coef['a1'] coef['b2'] = 3*(dy - alpha_f*dx) + 2*(alpha_f - alpha_i)*coef['a1'] + alpha_f*coef['a2'] coef['b3']", "returnR != 0: print('ERRO: Falha em GetObjectHandle para o Pioneer') exit() if returnT", "in path[:,0:2]: point2send = i packedData = sim.simxPackFloats(point2send.flatten()) raw_bytes = (ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData)", "# Make sure to have the server side running in CoppeliaSim: # in", "np.arctan2(coef['b1'] + 2*coef['b2']*l + 3*coef['b3']*l**2, coef['a1'] + 2*coef['a2']*l + 3*coef['a3']*l**2) return np.array(list(zip(x,y,th)),dtype=float) #####################################", "!= 0): resA,ang = sim.simxGetObjectOrientation(clientID, robot_handle, -1, sim.simx_opmode_streaming) resP,pos = sim.simxGetObjectPosition(clientID,robot_handle,-1, sim.simx_opmode_streaming) resT,posT=", "pf[1], pf[2]) print('Coeficientes:') print(coef) l = np.linspace(0,1,500) path = pathGenerator(coef, l) # print(path.shape)", "= 3*alpha_f*dx - 2*dy - (2*alpha_f - alpha_i)*coef['a1'] - alpha_f*coef['a2'] return coef def", "delta) < thi < (np.pi/2.0 + delta) thf_test = (np.pi/2.0 - delta) <", "returnR, robot_handle = sim.simxGetObjectHandle(clientID,'Pioneer_p3dx',sim.simx_opmode_oneshot_wait) returnT, target_handle = sim.simxGetObjectHandle(clientID,'Target',sim.simx_opmode_oneshot_wait) if returnR != 0: print('ERRO:", "- 2*dy - (2*alpha_f - alpha_i)*coef['a1'] - alpha_f*coef['a2'] return coef def pathGenerator(coef, l):", "GetObjectHandle para o Target') exit() ######################################## variables ############################################### time.sleep(0.5) resA,resP = 1, 1", "para o Target') exit() ######################################## variables ############################################### time.sleep(0.5) resA,resP = 1, 1 resT", "or the remoteApi library could not be found.') print ('Make sure both are", "ang[2]] pf = [posT[0], posT[1],0.0] coef = pathComputer(pi[0], pi[1], pi[2], pf[0], pf[1], pf[2])", "+ alpha_f*coef['a2'] coef['b3'] = 3*alpha_f*dx - 2*dy - (2*alpha_f - alpha_i)*coef['a1'] - alpha_f*coef['a2']", "path[0,1], 'or', label='Start') plt.plot(path[-1,0], path[-1,1], 'ob', label='End') plt.plot(path[:,0], path[:,1], '-k', label='Path') send_path_4_drawing(path) plt.title('Caminho", "np.tan(thi) coef['a1'] = 3*dx/2.0 #coef. livre coef['b2'] = 0 #coef. livre (qualquer valor", "running in CoppeliaSim: # in a child script of a CoppeliaSim scene, add", "that') print ('either \"sim.py\" or the remoteApi library could not be found.') print", "coef['b1'] = dy #coef. livre coef['b2'] = 0 #coef. livre coef['a0'] = xi", "Functions definition ############################################## # retorna Wd, We d = 0.331 #wheel axis distance", "e orientação inicial # xf, yf, thf => ponto e orientação final #", "(np.pi/2.0 - delta) < thf < (np.pi/2.0 + delta) if (thi_test) and (thf_test):", "2*coef['b3'] elif thf_test: print('Caso Especial #3') #caso especial 3 alpha_i = np.tan(thi) coef['a1']", "= sim.simxWriteStringStream(clientID, \"path_coord\", raw_bytes, sim.simx_opmode_oneshot) if returnCode != 0: # print('Error: fail to", "0 #coef. livre coef['a0'] = xi coef['a1'] = 0 coef['a2'] = 3*dx coef['a3']", "pf = [posT[0], posT[1],0.0] coef = pathComputer(pi[0], pi[1], pi[2], pf[0], pf[1], pf[2]) print('Coeficientes:')", "be imported. This means very probably that') print ('either \"sim.py\" or the remoteApi", "!= 0) and (resP != 0) and (resT != 0): resA,ang = sim.simxGetObjectOrientation(clientID,", "caso especial 1 coef['b1'] = dy #coef. livre coef['b2'] = 0 #coef. livre", "em GetObjectHandle para o Pioneer') exit() if returnT != 0: print('ERRO: Falha em", "coef def pathComputer(xi, yi, thi, xf, yf, thf): delta = 0.001 dx =", "y = coef['b0'] + coef['b1']*l + coef['b2']*l**2 + coef['b3']*l**3 th= np.arctan2(coef['b1'] + 2*coef['b2']*l", "3) # return: coef def pathComputer(xi, yi, thi, xf, yf, thf): delta =", "sim.simx_opmode_streaming) resT,posT= sim.simxGetObjectPosition(clientID,target_handle,-1, sim.simx_opmode_streaming) pi = [pos[0], pos[1], ang[2]] pf = [posT[0], posT[1],0.0]", "0 #coef. livre (qualquer valor aqui) coef['a0'] = xi coef['a1'] = 0 coef['a2']", "= (ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData) returnCode = sim.simxWriteStringStream(clientID, \"path_coord\", raw_bytes, sim.simx_opmode_oneshot) if returnCode !=", "# retorna Wd, We d = 0.331 #wheel axis distance r_w = 0.09751", "#coef. livre coef['a0'] = xi coef['a3'] = dx - coef['a1'] - coef['a2'] coef['b0']", "= 3*dx - 2*coef['a1'] coef['a3'] = coef['a1'] - 2*dx coef['b0'] = yi coef['b1']", "o Target') exit() ######################################## variables ############################################### time.sleep(0.5) resA,resP = 1, 1 resT =", "print(coef) l = np.linspace(0,1,500) path = pathGenerator(coef, l) # print(path.shape) plt.plot(path[0,0], path[0,1], 'or',", "= 2*(dy - alpha_f*dx) - alpha_f*coef['a3'] + coef['b3'] coef['b2'] = (2*alpha_f*dx - dy)", "0.001 dx = xf - xi dy = yf - yi coef =", "to the simulator!') pass time.sleep(sleep_time) # xi, yi, thi => ponto e orientação", "command sent out had time to arrive. You can guarantee this with (for", "sys import ctypes #################################### Functions definition ############################################## # retorna Wd, We d =", "print ('--------------------------------------------------------------') print ('\"sim.py\" could not be imported. This means very probably that')", "path[:,0:2]: point2send = i packedData = sim.simxPackFloats(point2send.flatten()) raw_bytes = (ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData) returnCode", "+ 2*(alpha_f - alpha_i)*coef['a1'] + alpha_f*coef['a2'] coef['b3'] = 3*alpha_f*dx - 2*dy - (2*alpha_f", "= sim.simxGetObjectOrientation(clientID, robot_handle, -1, sim.simx_opmode_streaming) resP,pos = sim.simxGetObjectPosition(clientID,robot_handle,-1, sim.simx_opmode_streaming) resT,posT= sim.simxGetObjectPosition(clientID,target_handle,-1, sim.simx_opmode_streaming) pi", "= 0.001 dx = xf - xi dy = yf - yi coef", "and run this program. # # IMPORTANT: for each successful call to simxStart,", "aqui) coef['a0'] = xi coef['a1'] = 0 coef['a2'] = dx - coef['a3'] coef['b0']", "server side running in CoppeliaSim: # in a child script of a CoppeliaSim", "l): x = coef['a0'] + coef['a1']*l + coef['a2']*l**2 + coef['a3']*l**3 y = coef['b0']", "sent out had time to arrive. You can guarantee this with (for example):", "<gh_stars>1-10 # Make sure to have the server side running in CoppeliaSim: #", "= yf - yi coef = dict() thi_test = (np.pi/2.0 - delta) <", "alpha_f = np.tan(thf) coef['a1'] = dx #coef. livre coef['a2'] = 0 #coef. livre", "We d = 0.331 #wheel axis distance r_w = 0.09751 #wheel radius def", "posT[1],0.0] coef = pathComputer(pi[0], pi[1], pi[2], pf[0], pf[1], pf[2]) print('Coeficientes:') print(coef) l =", "yi, thi => ponto e orientação inicial # xf, yf, thf => ponto", "#coef. livre (qualquer valor aqui) coef['a0'] = xi coef['a2'] = 3*dx - 2*coef['a1']", "coef['b3'] = 3*alpha_f*dx - 2*dy - (2*alpha_f - alpha_i)*coef['a1'] - alpha_f*coef['a2'] return coef", "- coef['b2'] elif thi_test: print('Caso Especial #2') #caso especial 2 alpha_f = np.tan(thf)", "send_path_4_drawing(path, sleep_time = 0.07): for i in path[:,0:2]: point2send = i packedData =", "yf, thf): delta = 0.001 dx = xf - xi dy = yf", "'or', label='Start') plt.plot(path[-1,0], path[-1,1], 'ob', label='End') plt.plot(path[:,0], path[:,1], '-k', label='Path') send_path_4_drawing(path) plt.title('Caminho gerado')", "pi[2], pf[0], pf[1], pf[2]) print('Coeficientes:') print(coef) l = np.linspace(0,1,500) path = pathGenerator(coef, l)", "= yi coef['b1'] = alpha_i*coef['a1'] coef['b3'] = dy - alpha_i*coef['a1'] - coef['b2'] else:", "call to simxStart, there # should be a corresponding call to simxFinish at", "raw_bytes, sim.simx_opmode_oneshot) if returnCode != 0: # print('Error: fail to send the path", "= 1, 1 resT = 1 while (resA != 0) and (resP !=", "np.tan(thf) coef['a3'] = -dx/2.0 #coef. livre coef['b3'] = 0 #coef. livre (qualquer valor", "grau 3) # return: coef def pathComputer(xi, yi, thi, xf, yf, thf): delta", "dy - alpha_i*coef['a1'] - coef['b2'] else: print('Caso Geral') #caso geral alpha_i = np.tan(thi)", "to arrive. You can guarantee this with (for example): sim.simxGetPingTime(clientID) # Now close", "Make sure to have the server side running in CoppeliaSim: # in a", "(qualquer valor aqui) coef['a0'] = xi coef['a1'] = 0 coef['a2'] = dx -", "= xi coef['a2'] = 3*dx - 2*coef['a1'] coef['a3'] = coef['a1'] - 2*dx coef['b0']", "= xi coef['a1'] = 0 coef['a2'] = dx - coef['a3'] coef['b0'] = yi", "+ coef['b3'] coef['b2'] = (2*alpha_f*dx - dy) + alpha_f*coef['a3'] - 2*coef['b3'] elif thf_test:", "# to be executed just once, at simulation start: # # simRemoteApi.start(19999) #", "dx = xf - xi dy = yf - yi coef = dict()", "there # should be a corresponding call to simxFinish at the end! try:", "(np.pi/2.0 - delta) < thi < (np.pi/2.0 + delta) thf_test = (np.pi/2.0 -", "coef['b2'] else: print('Caso Geral') #caso geral alpha_i = np.tan(thi) alpha_f = np.tan(thf) coef['a1']", "= np.tan(thf) coef['a3'] = -dx/2.0 #coef. livre coef['b3'] = 0 #coef. livre (qualquer", "delta) < thf < (np.pi/2.0 + delta) if (thi_test) and (thf_test): print('Caso Especial", ": {a0,a1,a2,a3,b0,b1,b2,b3} (dicionario com os coeficientes do pol. de grau 3) # return:", "coef['a1'] - 2*dx coef['b0'] = yi coef['b1'] = alpha_i*coef['a1'] coef['b3'] = dy -", "yf, thf => ponto e orientação final # coef : {a0,a1,a2,a3,b0,b1,b2,b3} (dicionario com", "- delta) < thi < (np.pi/2.0 + delta) thf_test = (np.pi/2.0 - delta)", "- coef['b1'] - coef['b2'] elif thi_test: print('Caso Especial #2') #caso especial 2 alpha_f", "dict() thi_test = (np.pi/2.0 - delta) < thi < (np.pi/2.0 + delta) thf_test", "dy - coef['b1'] - coef['b2'] elif thi_test: print('Caso Especial #2') #caso especial 2", "pathComputer(xi, yi, thi, xf, yf, thf): delta = 0.001 dx = xf -", "aqui) coef['a0'] = xi coef['a2'] = 3*dx - 2*coef['a1'] coef['a3'] = coef['a1'] -", "- alpha_i*coef['a1'] - coef['b2'] else: print('Caso Geral') #caso geral alpha_i = np.tan(thi) alpha_f", "closing the connection to CoppeliaSim, make sure that the last command sent out", "and the robot returnR, robot_handle = sim.simxGetObjectHandle(clientID,'Pioneer_p3dx',sim.simx_opmode_oneshot_wait) returnT, target_handle = sim.simxGetObjectHandle(clientID,'Target',sim.simx_opmode_oneshot_wait) if returnR", "= (v-d*omega) omega_right = v_r/r_w omega_left = v_l/r_w return omega_right, omega_left def send_path_4_drawing(path,", "to send the path point to the simulator!') pass time.sleep(sleep_time) # xi, yi,", "+ coef['a2']*l**2 + coef['a3']*l**3 y = coef['b0'] + coef['b1']*l + coef['b2']*l**2 + coef['b3']*l**3", "alpha_f*coef['a3'] + coef['b3'] coef['b2'] = (2*alpha_f*dx - dy) + alpha_f*coef['a3'] - 2*coef['b3'] elif", "the file \"sim.py\"') print ('--------------------------------------------------------------') print ('') import time import matplotlib.pyplot as plt", "em GetObjectHandle para o Target') exit() ######################################## variables ############################################### time.sleep(0.5) resA,resP = 1,", "=> ponto e orientação final # coef : {a0,a1,a2,a3,b0,b1,b2,b3} (dicionario com os coeficientes", "the end! try: import sim except: print ('--------------------------------------------------------------') print ('\"sim.py\" could not be", "= 3*(dy - alpha_f*dx) + 2*(alpha_f - alpha_i)*coef['a1'] + alpha_f*coef['a2'] coef['b3'] = 3*alpha_f*dx", "once, at simulation start: # # simRemoteApi.start(19999) # # then start simulation, and", "= i packedData = sim.simxPackFloats(point2send.flatten()) raw_bytes = (ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData) returnCode = sim.simxWriteStringStream(clientID,", "pi = [pos[0], pos[1], ang[2]] pf = [posT[0], posT[1],0.0] coef = pathComputer(pi[0], pi[1],", "# xi, yi, thi => ponto e orientação inicial # xf, yf, thf", "if (thi_test) and (thf_test): print('Caso Especial #1') # caso especial 1 coef['b1'] =", "('') import time import matplotlib.pyplot as plt import numpy as np import sys", "0: # print('Error: fail to send the path point to the simulator!') pass", "found.') print ('Make sure both are in the same folder as this file,')", "sim.simxGetObjectOrientation(clientID, robot_handle, -1, sim.simx_opmode_streaming) resP,pos = sim.simxGetObjectPosition(clientID,robot_handle,-1, sim.simx_opmode_streaming) resT,posT= sim.simxGetObjectPosition(clientID,target_handle,-1, sim.simx_opmode_streaming) pi =", "means very probably that') print ('either \"sim.py\" or the remoteApi library could not", "2*(alpha_f - alpha_i)*coef['a1'] + alpha_f*coef['a2'] coef['b3'] = 3*alpha_f*dx - 2*dy - (2*alpha_f -", "to simulator ################################################## clientID = sim.simxStart('127.0.0.1',19999,True,True,5000,5) if (clientID != 0): print('Falha na conexão')", "time import matplotlib.pyplot as plt import numpy as np import sys import ctypes", "plt.plot(path[-1,0], path[-1,1], 'ob', label='End') plt.plot(path[:,0], path[:,1], '-k', label='Path') send_path_4_drawing(path) plt.title('Caminho gerado') plt.ylabel('y[m]') plt.xlabel('x[m]')", "= (np.pi/2.0 - delta) < thf < (np.pi/2.0 + delta) if (thi_test) and", "(qualquer valor aqui) coef['a0'] = xi coef['a2'] = 3*dx - 2*coef['a1'] coef['a3'] =", "!= 0: # print('Error: fail to send the path point to the simulator!')", "delta) if (thi_test) and (thf_test): print('Caso Especial #1') # caso especial 1 coef['b1']", "# then start simulation, and run this program. # # IMPORTANT: for each", "simRemoteApi.start(19999) # # then start simulation, and run this program. # # IMPORTANT:", "folder as this file,') print ('or appropriately adjust the file \"sim.py\"') print ('--------------------------------------------------------------')", "v_r = (v+d*omega) v_l = (v-d*omega) omega_right = v_r/r_w omega_left = v_l/r_w return", "def send_path_4_drawing(path, sleep_time = 0.07): for i in path[:,0:2]: point2send = i packedData", "simulation start: # # simRemoteApi.start(19999) # # then start simulation, and run this", "omega): v_r = (v+d*omega) v_l = (v-d*omega) omega_right = v_r/r_w omega_left = v_l/r_w", "pass time.sleep(sleep_time) # xi, yi, thi => ponto e orientação inicial # xf,", "coef['a2']*l**2 + coef['a3']*l**3 y = coef['b0'] + coef['b1']*l + coef['b2']*l**2 + coef['b3']*l**3 th=", "coef['a0'] = xi coef['a1'] = 0 coef['a2'] = 3*dx coef['a3'] = -2*dx coef['b0']", "(2*alpha_f - alpha_i)*coef['a1'] - alpha_f*coef['a2'] return coef def pathGenerator(coef, l): x = coef['a0']", "xf, yf, thf): delta = 0.001 dx = xf - xi dy =", "print('Falha na conexão') exit() print('Conectado!') # handlers of motors and the robot returnR,", "print('Caso Especial #3') #caso especial 3 alpha_i = np.tan(thi) coef['a1'] = 3*dx/2.0 #coef.", "for i in path[:,0:2]: point2send = i packedData = sim.simxPackFloats(point2send.flatten()) raw_bytes = (ctypes.c_ubyte", "= 0 coef['a2'] = 3*dx coef['a3'] = -2*dx coef['b0'] = yi coef['b3'] =", "= -dx/2.0 #coef. livre coef['b3'] = 0 #coef. livre (qualquer valor aqui) coef['a0']", "##################################### Connecting to simulator ################################################## clientID = sim.simxStart('127.0.0.1',19999,True,True,5000,5) if (clientID != 0): print('Falha", "len(packedData)).from_buffer_copy(packedData) returnCode = sim.simxWriteStringStream(clientID, \"path_coord\", raw_bytes, sim.simx_opmode_oneshot) if returnCode != 0: # print('Error:", "- 2*dx coef['b0'] = yi coef['b1'] = alpha_i*coef['a1'] coef['b3'] = dy - alpha_i*coef['a1']", "= (v+d*omega) v_l = (v-d*omega) omega_right = v_r/r_w omega_left = v_l/r_w return omega_right,", "command # to be executed just once, at simulation start: # # simRemoteApi.start(19999)", "= [posT[0], posT[1],0.0] coef = pathComputer(pi[0], pi[1], pi[2], pf[0], pf[1], pf[2]) print('Coeficientes:') print(coef)", "configuration ####################################### ######################################## simulation ####################################### # Before closing the connection to CoppeliaSim, make", "the connection to CoppeliaSim, make sure that the last command sent out had", "print('ERRO: Falha em GetObjectHandle para o Target') exit() ######################################## variables ############################################### time.sleep(0.5) resA,resP", "simulator ################################################## clientID = sim.simxStart('127.0.0.1',19999,True,True,5000,5) if (clientID != 0): print('Falha na conexão') exit()", "in CoppeliaSim: # in a child script of a CoppeliaSim scene, add following", "xi, yi, thi => ponto e orientação inicial # xf, yf, thf =>", "print ('or appropriately adjust the file \"sim.py\"') print ('--------------------------------------------------------------') print ('') import time", "coef['b2'] = (2*alpha_f*dx - dy) + alpha_f*coef['a3'] - 2*coef['b3'] elif thf_test: print('Caso Especial", "thi < (np.pi/2.0 + delta) thf_test = (np.pi/2.0 - delta) < thf <", "sim.simxGetObjectPosition(clientID,target_handle,-1, sim.simx_opmode_streaming) pi = [pos[0], pos[1], ang[2]] pf = [posT[0], posT[1],0.0] coef =", "e orientação final # coef : {a0,a1,a2,a3,b0,b1,b2,b3} (dicionario com os coeficientes do pol.", "# print('Error: fail to send the path point to the simulator!') pass time.sleep(sleep_time)", "= sim.simxGetObjectHandle(clientID,'Target',sim.simx_opmode_oneshot_wait) if returnR != 0: print('ERRO: Falha em GetObjectHandle para o Pioneer')", "d = 0.331 #wheel axis distance r_w = 0.09751 #wheel radius def pioneer_robot_model(v,", "= yi coef['b1'] = 2*(dy - alpha_f*dx) - alpha_f*coef['a3'] + coef['b3'] coef['b2'] =", "0): print('Falha na conexão') exit() print('Conectado!') # handlers of motors and the robot", "{a0,a1,a2,a3,b0,b1,b2,b3} (dicionario com os coeficientes do pol. de grau 3) # return: coef", "coef['a1']*l + coef['a2']*l**2 + coef['a3']*l**3 y = coef['b0'] + coef['b1']*l + coef['b2']*l**2 +", "2 alpha_f = np.tan(thf) coef['a3'] = -dx/2.0 #coef. livre coef['b3'] = 0 #coef.", "- (2*alpha_f - alpha_i)*coef['a1'] - alpha_f*coef['a2'] return coef def pathGenerator(coef, l): x =", "alpha_i)*coef['a1'] - alpha_f*coef['a2'] return coef def pathGenerator(coef, l): x = coef['a0'] + coef['a1']*l", "(ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData) returnCode = sim.simxWriteStringStream(clientID, \"path_coord\", raw_bytes, sim.simx_opmode_oneshot) if returnCode != 0:", "target_handle = sim.simxGetObjectHandle(clientID,'Target',sim.simx_opmode_oneshot_wait) if returnR != 0: print('ERRO: Falha em GetObjectHandle para o", "#caso especial 3 alpha_i = np.tan(thi) coef['a1'] = 3*dx/2.0 #coef. livre coef['b2'] =", "appropriately adjust the file \"sim.py\"') print ('--------------------------------------------------------------') print ('') import time import matplotlib.pyplot", "0 #coef. livre (qualquer valor aqui) coef['a0'] = xi coef['a2'] = 3*dx -", "coef['a0'] = xi coef['a2'] = 3*dx - 2*coef['a1'] coef['a3'] = coef['a1'] - 2*dx", "[pos[0], pos[1], ang[2]] pf = [posT[0], posT[1],0.0] coef = pathComputer(pi[0], pi[1], pi[2], pf[0],", "simulation, and run this program. # # IMPORTANT: for each successful call to", "corresponding call to simxFinish at the end! try: import sim except: print ('--------------------------------------------------------------')", "Wd, We d = 0.331 #wheel axis distance r_w = 0.09751 #wheel radius", "#1') # caso especial 1 coef['b1'] = dy #coef. livre coef['b2'] = 0", "np.tan(thi) alpha_f = np.tan(thf) coef['a1'] = dx #coef. livre coef['a2'] = 0 #coef.", "yi coef['b1'] = alpha_i*coef['a1'] coef['b3'] = dy - alpha_i*coef['a1'] - coef['b2'] else: print('Caso", "return coef def pathGenerator(coef, l): x = coef['a0'] + coef['a1']*l + coef['a2']*l**2 +", "especial 3 alpha_i = np.tan(thi) coef['a1'] = 3*dx/2.0 #coef. livre coef['b2'] = 0", "######################################## variables ############################################### time.sleep(0.5) resA,resP = 1, 1 resT = 1 while (resA", "+ delta) thf_test = (np.pi/2.0 - delta) < thf < (np.pi/2.0 + delta)", "send the path point to the simulator!') pass time.sleep(sleep_time) # xi, yi, thi", "especial 1 coef['b1'] = dy #coef. livre coef['b2'] = 0 #coef. livre coef['a0']", "this with (for example): sim.simxGetPingTime(clientID) # Now close the connection to CoppeliaSim: sim.simxFinish(clientID)", "import matplotlib.pyplot as plt import numpy as np import sys import ctypes ####################################", "Especial #3') #caso especial 3 alpha_i = np.tan(thi) coef['a1'] = 3*dx/2.0 #coef. livre", "* len(packedData)).from_buffer_copy(packedData) returnCode = sim.simxWriteStringStream(clientID, \"path_coord\", raw_bytes, sim.simx_opmode_oneshot) if returnCode != 0: #", "= xf - xi dy = yf - yi coef = dict() thi_test", "coef['a1'] - coef['a2'] coef['b0'] = yi coef['b1'] = alpha_i*coef['a1'] coef['b2'] = 3*(dy -", "coef['a0'] + coef['a1']*l + coef['a2']*l**2 + coef['a3']*l**3 y = coef['b0'] + coef['b1']*l +", "label='End') plt.plot(path[:,0], path[:,1], '-k', label='Path') send_path_4_drawing(path) plt.title('Caminho gerado') plt.ylabel('y[m]') plt.xlabel('x[m]') plt.grid(True) plt.legend() plt.show()", "= coef['a0'] + coef['a1']*l + coef['a2']*l**2 + coef['a3']*l**3 y = coef['b0'] + coef['b1']*l", "plt.plot(path[:,0], path[:,1], '-k', label='Path') send_path_4_drawing(path) plt.title('Caminho gerado') plt.ylabel('y[m]') plt.xlabel('x[m]') plt.grid(True) plt.legend() plt.show() #######################################", "coef['b0'] = yi coef['b1'] = alpha_i*coef['a1'] coef['b3'] = dy - alpha_i*coef['a1'] - coef['b2']", "sim.simx_opmode_streaming) pi = [pos[0], pos[1], ang[2]] pf = [posT[0], posT[1],0.0] coef = pathComputer(pi[0],", "import ctypes #################################### Functions definition ############################################## # retorna Wd, We d = 0.331", "coef['b2'] = 0 #coef. livre (qualquer valor aqui) coef['a0'] = xi coef['a2'] =", "thi_test: print('Caso Especial #2') #caso especial 2 alpha_f = np.tan(thf) coef['a3'] = -dx/2.0", "dx - coef['a1'] - coef['a2'] coef['b0'] = yi coef['b1'] = alpha_i*coef['a1'] coef['b2'] =", "definition ############################################## # retorna Wd, We d = 0.331 #wheel axis distance r_w", "(v+d*omega) v_l = (v-d*omega) omega_right = v_r/r_w omega_left = v_l/r_w return omega_right, omega_left", "yi coef['b1'] = alpha_i*coef['a1'] coef['b2'] = 3*(dy - alpha_f*dx) + 2*(alpha_f - alpha_i)*coef['a1']", "#################################### Functions definition ############################################## # retorna Wd, We d = 0.331 #wheel axis", "are in the same folder as this file,') print ('or appropriately adjust the", "- delta) < thf < (np.pi/2.0 + delta) if (thi_test) and (thf_test): print('Caso", "coef['a1'] = dx #coef. livre coef['a2'] = 0 #coef. livre coef['a0'] = xi", "graph configuration ####################################### ######################################## simulation ####################################### # Before closing the connection to CoppeliaSim,", "Especial #2') #caso especial 2 alpha_f = np.tan(thf) coef['a3'] = -dx/2.0 #coef. livre", "the simulator!') pass time.sleep(sleep_time) # xi, yi, thi => ponto e orientação inicial", "alpha_f*coef['a2'] coef['b3'] = 3*alpha_f*dx - 2*dy - (2*alpha_f - alpha_i)*coef['a1'] - alpha_f*coef['a2'] return", "3*dx - 2*coef['a1'] coef['a3'] = coef['a1'] - 2*dx coef['b0'] = yi coef['b1'] =", "if (clientID != 0): print('Falha na conexão') exit() print('Conectado!') # handlers of motors", "send_path_4_drawing(path) plt.title('Caminho gerado') plt.ylabel('y[m]') plt.xlabel('x[m]') plt.grid(True) plt.legend() plt.show() ####################################### graph configuration ####################################### ########################################", "exit() ######################################## variables ############################################### time.sleep(0.5) resA,resP = 1, 1 resT = 1 while", "label='Start') plt.plot(path[-1,0], path[-1,1], 'ob', label='End') plt.plot(path[:,0], path[:,1], '-k', label='Path') send_path_4_drawing(path) plt.title('Caminho gerado') plt.ylabel('y[m]')", "thf_test: print('Caso Especial #3') #caso especial 3 alpha_i = np.tan(thi) coef['a1'] = 3*dx/2.0", "############################################## # retorna Wd, We d = 0.331 #wheel axis distance r_w =", "a corresponding call to simxFinish at the end! try: import sim except: print", "coef['b2'] = 0 #coef. livre coef['a0'] = xi coef['a1'] = 0 coef['a2'] =", "print('Coeficientes:') print(coef) l = np.linspace(0,1,500) path = pathGenerator(coef, l) # print(path.shape) plt.plot(path[0,0], path[0,1],", "Especial #1') # caso especial 1 coef['b1'] = dy #coef. livre coef['b2'] =", "exit() if returnT != 0: print('ERRO: Falha em GetObjectHandle para o Target') exit()", "'ob', label='End') plt.plot(path[:,0], path[:,1], '-k', label='Path') send_path_4_drawing(path) plt.title('Caminho gerado') plt.ylabel('y[m]') plt.xlabel('x[m]') plt.grid(True) plt.legend()", "yi, thi, xf, yf, thf): delta = 0.001 dx = xf - xi", "= [pos[0], pos[1], ang[2]] pf = [posT[0], posT[1],0.0] coef = pathComputer(pi[0], pi[1], pi[2],", "# coef : {a0,a1,a2,a3,b0,b1,b2,b3} (dicionario com os coeficientes do pol. de grau 3)", "+ coef['a1']*l + coef['a2']*l**2 + coef['a3']*l**3 y = coef['b0'] + coef['b1']*l + coef['b2']*l**2", "2*coef['b2']*l + 3*coef['b3']*l**2, coef['a1'] + 2*coef['a2']*l + 3*coef['a3']*l**2) return np.array(list(zip(x,y,th)),dtype=float) ##################################### Connecting to", "= dy #coef. livre coef['b2'] = 0 #coef. livre coef['a0'] = xi coef['a1']", "#wheel radius def pioneer_robot_model(v, omega): v_r = (v+d*omega) v_l = (v-d*omega) omega_right =", "Geral') #caso geral alpha_i = np.tan(thi) alpha_f = np.tan(thf) coef['a1'] = dx #coef.", "coef['a3'] = -2*dx coef['b0'] = yi coef['b3'] = dy - coef['b1'] - coef['b2']", "\"path_coord\", raw_bytes, sim.simx_opmode_oneshot) if returnCode != 0: # print('Error: fail to send the", "- alpha_i)*coef['a1'] + alpha_f*coef['a2'] coef['b3'] = 3*alpha_f*dx - 2*dy - (2*alpha_f - alpha_i)*coef['a1']", "Connecting to simulator ################################################## clientID = sim.simxStart('127.0.0.1',19999,True,True,5000,5) if (clientID != 0): print('Falha na", "very probably that') print ('either \"sim.py\" or the remoteApi library could not be", "yi coef['b3'] = dy - coef['b1'] - coef['b2'] elif thi_test: print('Caso Especial #2')", "ponto e orientação inicial # xf, yf, thf => ponto e orientação final", "coef['a3'] = dx - coef['a1'] - coef['a2'] coef['b0'] = yi coef['b1'] = alpha_i*coef['a1']", "-1, sim.simx_opmode_streaming) resP,pos = sim.simxGetObjectPosition(clientID,robot_handle,-1, sim.simx_opmode_streaming) resT,posT= sim.simxGetObjectPosition(clientID,target_handle,-1, sim.simx_opmode_streaming) pi = [pos[0], pos[1],", "scene, add following command # to be executed just once, at simulation start:", "file \"sim.py\"') print ('--------------------------------------------------------------') print ('') import time import matplotlib.pyplot as plt import", "this file,') print ('or appropriately adjust the file \"sim.py\"') print ('--------------------------------------------------------------') print ('')", "plt import numpy as np import sys import ctypes #################################### Functions definition ##############################################", "print ('Make sure both are in the same folder as this file,') print", "#coef. livre coef['b3'] = 0 #coef. livre (qualquer valor aqui) coef['a0'] = xi", "alpha_f*coef['a2'] return coef def pathGenerator(coef, l): x = coef['a0'] + coef['a1']*l + coef['a2']*l**2", "dy) + alpha_f*coef['a3'] - 2*coef['b3'] elif thf_test: print('Caso Especial #3') #caso especial 3", "point2send = i packedData = sim.simxPackFloats(point2send.flatten()) raw_bytes = (ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData) returnCode =", "1 resT = 1 while (resA != 0) and (resP != 0) and", "print('Conectado!') # handlers of motors and the robot returnR, robot_handle = sim.simxGetObjectHandle(clientID,'Pioneer_p3dx',sim.simx_opmode_oneshot_wait) returnT,", "(dicionario com os coeficientes do pol. de grau 3) # return: coef def", "# # IMPORTANT: for each successful call to simxStart, there # should be", "guarantee this with (for example): sim.simxGetPingTime(clientID) # Now close the connection to CoppeliaSim:", "0.07): for i in path[:,0:2]: point2send = i packedData = sim.simxPackFloats(point2send.flatten()) raw_bytes =", "####################################### ######################################## simulation ####################################### # Before closing the connection to CoppeliaSim, make sure", "sim.simx_opmode_streaming) resP,pos = sim.simxGetObjectPosition(clientID,robot_handle,-1, sim.simx_opmode_streaming) resT,posT= sim.simxGetObjectPosition(clientID,target_handle,-1, sim.simx_opmode_streaming) pi = [pos[0], pos[1], ang[2]]", "inicial # xf, yf, thf => ponto e orientação final # coef :", "adjust the file \"sim.py\"') print ('--------------------------------------------------------------') print ('') import time import matplotlib.pyplot as", "[posT[0], posT[1],0.0] coef = pathComputer(pi[0], pi[1], pi[2], pf[0], pf[1], pf[2]) print('Coeficientes:') print(coef) l", "robot returnR, robot_handle = sim.simxGetObjectHandle(clientID,'Pioneer_p3dx',sim.simx_opmode_oneshot_wait) returnT, target_handle = sim.simxGetObjectHandle(clientID,'Target',sim.simx_opmode_oneshot_wait) if returnR != 0:", "pf[2]) print('Coeficientes:') print(coef) l = np.linspace(0,1,500) path = pathGenerator(coef, l) # print(path.shape) plt.plot(path[0,0],", "livre coef['a0'] = xi coef['a3'] = dx - coef['a1'] - coef['a2'] coef['b0'] =", "returnT != 0: print('ERRO: Falha em GetObjectHandle para o Target') exit() ######################################## variables", "+ 2*coef['b2']*l + 3*coef['b3']*l**2, coef['a1'] + 2*coef['a2']*l + 3*coef['a3']*l**2) return np.array(list(zip(x,y,th)),dtype=float) ##################################### Connecting", "#3') #caso especial 3 alpha_i = np.tan(thi) coef['a1'] = 3*dx/2.0 #coef. livre coef['b2']", "coef['a1'] = 0 coef['a2'] = 3*dx coef['a3'] = -2*dx coef['b0'] = yi coef['b3']" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "the time interval [\"\",\"\"] \"\"\" offset = np.array([0., 0.06997924, 0.11059547, -0.05232682]) for i,", "KIND, either express or implied. # See the License for the specific language", "and # limitations under the License. import numpy as np def remove_bz_offset(b_mms): \"\"\"", "Unless required by applicable law or agreed to in writing, software # distributed", "limitations under the License. import numpy as np def remove_bz_offset(b_mms): \"\"\" Remove offset", "interval [\"\",\"\"] \"\"\" offset = np.array([0., 0.06997924, 0.11059547, -0.05232682]) for i, b_xyz in", "# Copyright 2020 <NAME> # # Licensed under the Apache License, Version 2.0", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "License. # You may obtain a copy of the License at # #", "2020 <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");", "law or agreed to in writing, software # distributed under the License is", "\"\"\" offset = np.array([0., 0.06997924, 0.11059547, -0.05232682]) for i, b_xyz in enumerate(b_mms): b_xyz[:,", "computed using the time interval [\"\",\"\"] \"\"\" offset = np.array([0., 0.06997924, 0.11059547, -0.05232682])", "the License for the specific language governing permissions and # limitations under the", "compliance with the License. # You may obtain a copy of the License", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "Bz. The offset is computed using the time interval [\"\",\"\"] \"\"\" offset =", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "for the specific language governing permissions and # limitations under the License. import", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "time interval [\"\",\"\"] \"\"\" offset = np.array([0., 0.06997924, 0.11059547, -0.05232682]) for i, b_xyz", "[\"\",\"\"] \"\"\" offset = np.array([0., 0.06997924, 0.11059547, -0.05232682]) for i, b_xyz in enumerate(b_mms):", "\"\"\" Remove offset on Bz. The offset is computed using the time interval", "<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "= np.array([0., 0.06997924, 0.11059547, -0.05232682]) for i, b_xyz in enumerate(b_mms): b_xyz[:, 2] -=", "ANY KIND, either express or implied. # See the License for the specific", "remove_bz_offset(b_mms): \"\"\" Remove offset on Bz. The offset is computed using the time", "language governing permissions and # limitations under the License. import numpy as np", "offset = np.array([0., 0.06997924, 0.11059547, -0.05232682]) for i, b_xyz in enumerate(b_mms): b_xyz[:, 2]", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "License. import numpy as np def remove_bz_offset(b_mms): \"\"\" Remove offset on Bz. The", "governing permissions and # limitations under the License. import numpy as np def", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "use this file except in compliance with the License. # You may obtain", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "numpy as np def remove_bz_offset(b_mms): \"\"\" Remove offset on Bz. The offset is", "not use this file except in compliance with the License. # You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "using the time interval [\"\",\"\"] \"\"\" offset = np.array([0., 0.06997924, 0.11059547, -0.05232682]) for", "import numpy as np def remove_bz_offset(b_mms): \"\"\" Remove offset on Bz. The offset", "<gh_stars>0 # Copyright 2020 <NAME> # # Licensed under the Apache License, Version", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "0.11059547, -0.05232682]) for i, b_xyz in enumerate(b_mms): b_xyz[:, 2] -= offset[i] return b_mms", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "OF ANY KIND, either express or implied. # See the License for the", "2.0 (the \"License\"); # you may not use this file except in compliance", "offset on Bz. The offset is computed using the time interval [\"\",\"\"] \"\"\"", "# you may not use this file except in compliance with the License.", "Remove offset on Bz. The offset is computed using the time interval [\"\",\"\"]", "permissions and # limitations under the License. import numpy as np def remove_bz_offset(b_mms):", "The offset is computed using the time interval [\"\",\"\"] \"\"\" offset = np.array([0.,", "agreed to in writing, software # distributed under the License is distributed on", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "(the \"License\"); # you may not use this file except in compliance with", "def remove_bz_offset(b_mms): \"\"\" Remove offset on Bz. The offset is computed using the", "on Bz. The offset is computed using the time interval [\"\",\"\"] \"\"\" offset", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "except in compliance with the License. # You may obtain a copy of", "by applicable law or agreed to in writing, software # distributed under the", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "either express or implied. # See the License for the specific language governing", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "# limitations under the License. import numpy as np def remove_bz_offset(b_mms): \"\"\" Remove", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "Copyright 2020 <NAME> # # Licensed under the Apache License, Version 2.0 (the", "specific language governing permissions and # limitations under the License. import numpy as", "offset is computed using the time interval [\"\",\"\"] \"\"\" offset = np.array([0., 0.06997924,", "file except in compliance with the License. # You may obtain a copy", "is computed using the time interval [\"\",\"\"] \"\"\" offset = np.array([0., 0.06997924, 0.11059547,", "np def remove_bz_offset(b_mms): \"\"\" Remove offset on Bz. The offset is computed using", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "the License. # You may obtain a copy of the License at #", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "0.06997924, 0.11059547, -0.05232682]) for i, b_xyz in enumerate(b_mms): b_xyz[:, 2] -= offset[i] return", "the License. import numpy as np def remove_bz_offset(b_mms): \"\"\" Remove offset on Bz.", "applicable law or agreed to in writing, software # distributed under the License", "or agreed to in writing, software # distributed under the License is distributed", "or implied. # See the License for the specific language governing permissions and", "under the License. import numpy as np def remove_bz_offset(b_mms): \"\"\" Remove offset on", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "as np def remove_bz_offset(b_mms): \"\"\" Remove offset on Bz. The offset is computed", "the specific language governing permissions and # limitations under the License. import numpy", "with the License. # You may obtain a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "np.array([0., 0.06997924, 0.11059547, -0.05232682]) for i, b_xyz in enumerate(b_mms): b_xyz[:, 2] -= offset[i]", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE", "'../..' verboseMode = False testMode = False def exitProgram(code): if not testMode: exit(code)", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES", "python does not work when called from IDE on Windows) # # Copyright", "\"target=\", \"verbose\", \"version\"]) except getopt.GetoptError as err: # print help information and exit:", "opts: if o in (\"-a\", \"--arch\"): architecture = a.lower() if o in (\"-t\",", "SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT", "= (os.system(\"cmp -s \" + outputDirName + \"config-tmp \" + outputDirName + \"config\")", "sys.stderr.write(\" -c, --continue Continue on errors (test mode)\\n\") sys.stderr.write(\" -h, --help Print this", "\"output=\", \"path=\", \"target=\", \"verbose\", \"version\"]) except getopt.GetoptError as err: # print help information", "without # modification, are permitted provided that the following conditions are met: #", "sys.exit(0) elif o in (\"-V\", \"--verbose\"): verboseMode = True elif o in (\"-h\",", "os.path.exists(outputDirName): os.makedirs(outputDirName) numDirs = len(os.path.normpath(outputFileName).split(os.sep)) - 1 dirname = os.path.dirname(os.path.realpath(outputFileName)) if os.path.isabs(pathToOS): makefilePathToOS", "# \\ is special character, creates problems in makefile where this path is", "# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "= 'pc' targetOS = 'mansos' pathToOS = '../..' verboseMode = False testMode =", "as e: pass with open(outputDirName + \"config-tmp\", 'a+') as outputFile: g.generateConfigFile(outputFile) # replace", "retain the above copyright notice, # this list of conditions and the following", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS", "o in (\"-c\", \"--continue\"): testMode = True if len(args): inputFileName = args[0] args", "have installed modules:\" print (\"Cannot run SEAL parser:\") if not plyModuleOK: print (\"\\tPLY", "if not os.path.exists(outputDirName): os.makedirs(outputDirName) numDirs = len(os.path.normpath(outputFileName).split(os.sep)) - 1 dirname = os.path.dirname(os.path.realpath(outputFileName)) if", "<target>, --target Target OS (default: {0})\\n\".format(targetOS)) sys.stderr.write(\" -o, --output <file> Output to file,", "\"Makefile\", 'w') as outputFile: g.generateMakefile(outputFile, outputFileName, makefilePathToOS) # use SEAL application's config file", "shutil inputFileName = 'test.sl' outputFileName = 'main.c' architecture = 'testarch' #architecture = 'msp430'", "targetOS = 'mansos' pathToOS = '../..' verboseMode = False testMode = False def", "+ \"config\", outputDirName + \"config-tmp\") except IOError as e: try: os.remove(outputDirName + \"config-tmp\")", "\"config-tmp \" + outputDirName + \"config\") == 0) except: isSame = False if", "in (\"-c\", \"--continue\"): testMode = True if len(args): inputFileName = args[0] args =", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #", "showHelp = True elif o in (\"-o\", \"--output\"): outputFileName = a elif o", "+= os.sep if not os.path.exists(outputDirName): os.makedirs(outputDirName) numDirs = len(os.path.normpath(outputFileName).split(os.sep)) - 1 dirname =", "THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF", "False showHelp = False for o, a in opts: if o in (\"-a\",", "def parseCommandLine(argv): global inputFileName global outputFileName global architecture global verboseMode global testMode global", "\" + release + \" (Release date: \" + date + \")\") sys.exit(0)", "elif o in (\"-V\", \"--verbose\"): verboseMode = True elif o in (\"-h\", \"--help\"):", "LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,", "THE COPYRIGHT HOLDER OR CONTRIBUTORS OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT,", "LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT", "inputFileName global outputFileName global architecture global verboseMode global testMode global pathToOS try: opts,", "outputDirName + \"config-tmp\") except IOError as e: try: os.remove(outputDirName + \"config-tmp\") except OSError", "os.remove(outputFileName) os.remove(outputDirName + \"Makefile\") os.remove(outputDirName + \"config\") return -1 if g.isComponentUsed(\"network\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'),", "are permitted provided that the following conditions are met: # * Redistributions of", "# replace the config file only if different: saves rebuiding time. try: isSame", "SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os,", "verboseMode global testMode global pathToOS try: opts, args = getopt.getopt(sys.argv[1:], \"a:cho:p:t:Vv\", [\"arch=\", \"continue\",", "f.readlines() f.close() if len(lines) > 0: release = lines[0].strip() if len(lines) > 1:", "pathToOS.strip('\\\\'); # \\ is special character, creates problems in makefile where this path", "= open(versionFile, \"r\") lines = f.readlines() f.close() if len(lines) > 0: release =", "os.sep + \"config\", outputDirName + \"config-tmp\") except IOError as e: try: os.remove(outputDirName +", "of conditions and the following disclaimer. # * Redistributions in binary form must", "the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "release + \" (Release date: \" + date + \")\") sys.exit(0) elif o", "a elif o in (\"-c\", \"--continue\"): testMode = True if len(args): inputFileName =", "help(isError) def main(): if not importsOk(): exit(1) # import pathname where seal package", "not plyModuleOK: print (\"\\tPLY module not found\") installStr += \" python-ply\" print (installStr)", "conditions and the following disclaimer in the # documentation and/or other materials provided", "module not found\") installStr += \" python-ply\" print (installStr) return False return True", "inputFileName = 'test.sl' outputFileName = 'main.c' architecture = 'testarch' #architecture = 'msp430' #architecture", "sys.stderr.write(\" -p, --path <path> Path to the target OS installation (default: {0})\\n\".format(pathToOS)) sys.stderr.write(\"", "o in (\"-o\", \"--output\"): outputFileName = a elif o in (\"-p\", \"--path\"): pathToOS", "where this path is inserted else: makefilePathToOS = os.path.normpath(dirname + os.sep + ('/..'", "generator.components.clearGlobals() parseCommandLine(sys.argv) # for extension modules sys.path.append(os.path.join(os.getcwd(), os.path.dirname(inputFileName))) # read file to-be-parsed with", "generate C code to an output file g = generator.createGenerator(targetOS) if g is", "{0})\\n\".format(outputFileName)) sys.stderr.write(\" -p, --path <path> Path to the target OS installation (default: {0})\\n\".format(pathToOS))", "\"a:cho:p:t:Vv\", [\"arch=\", \"continue\", \"help\", \"output=\", \"path=\", \"target=\", \"verbose\", \"version\"]) except getopt.GetoptError as err:", "is located selfDirname = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools')) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools', 'seal', 'components'))", "-t <target>, --target Target OS (default: {0})\\n\".format(targetOS)) sys.stderr.write(\" -o, --output <file> Output to", "ex: print (ex) if generator.components.componentRegister.isError: # cleanup os.remove(outputFileName) os.remove(outputDirName + \"Makefile\") os.remove(outputDirName +", "not importsOk(): exit(1) # import pathname where seal package is located selfDirname =", "# cleanup os.remove(outputFileName) os.remove(outputDirName + \"Makefile\") os.remove(outputDirName + \"config\") return -1 if g.isComponentUsed(\"network\"):", "case this is used multiple times generator.components.clearGlobals() parseCommandLine(sys.argv) # for extension modules sys.path.append(os.path.join(os.getcwd(),", "os.path.dirname(inputFileName))) # read file to-be-parsed with open(inputFileName, 'r') as inputFile: contents = inputFile.read()", "elif g.isComponentUsed(\"radio\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) if g.isComponentUsed(\"sdcard\"): g.generateRaw2Csv(outputDirName, os.path.join(selfDirname, 'raw2csv-template.py')) return 0 if", "outputFileName = 'main.c' architecture = 'testarch' #architecture = 'msp430' #architecture = 'pc' targetOS", "LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "os.path.isabs(pathToOS): makefilePathToOS = pathToOS.strip('\\\\'); # \\ is special character, creates problems in makefile", "else: installStr = \"Make sure you have installed modules:\" print (\"Cannot run SEAL", "WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF", "-v, --version Print version and exit\\n\") sys.stderr.write(\" -c, --continue Continue on errors (test", "global outputFileName global architecture global verboseMode global testMode global pathToOS try: opts, args", "exit: print (str(err)) # will print something like \"option -a not recognized\" help(True)", "SEAL application's config file as the basis try: shutil.copyfile(outputDirName + \"..\" + os.sep", "notice, this list of conditions and the following disclaimer in the # documentation", "read file {0}'.format(inputFileName)) exitProgram(1) # parse input file (SEAL code) parser = generator.SealParser(architecture,", "open(inputFileName, 'r') as inputFile: contents = inputFile.read() if contents == None: sys.stderr.write('Failed to", "not recognized\" help(True) isError = False showHelp = False for o, a in", "if parser.isError: exitProgram(1) # do not generate output file in this case #", "'tools')) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools', 'seal', 'components')) from seal import generator # in case", "in (\"-t\", \"--target\"): targetOS = a.lower() elif o in (\"-v\", \"--version\"): versionFile =", "CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "\"r\") lines = f.readlines() f.close() if len(lines) > 0: release = lines[0].strip() if", "os.remove(outputDirName + \"config\") return -1 if g.isComponentUsed(\"network\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) g.generateForwarderCode(os.path.join(outputDirName, 'fwd'), makefilePathToOS)", "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR # CONTRIBUTORS", "rebuiding time. try: isSame = (os.system(\"cmp -s \" + outputDirName + \"config-tmp \"", "False def exitProgram(code): if not testMode: exit(code) print (\"Would exit from program with", "print (ex) if generator.components.componentRegister.isError: # cleanup os.remove(outputFileName) os.remove(outputDirName + \"Makefile\") os.remove(outputDirName + \"config\")", "this list of conditions and the following disclaimer. # * Redistributions in binary", "AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR", "'test.sl' outputFileName = 'main.c' architecture = 'testarch' #architecture = 'msp430' #architecture = 'pc'", "\"--arch\"): architecture = a.lower() if o in (\"-t\", \"--target\"): targetOS = a.lower() elif", "and exit\\n\") sys.stderr.write(\" -c, --continue Continue on errors (test mode)\\n\") sys.stderr.write(\" -h, --help", "the following conditions are met: # * Redistributions of source code must retain", "[\"arch=\", \"continue\", \"help\", \"output=\", \"path=\", \"target=\", \"verbose\", \"version\"]) except getopt.GetoptError as err: #", "help(isError): sys.stderr.write(\"Usage:\\n\") sys.stderr.write(\" -a <arch>, --arch Target architecture (defalt: {})\\n\".format(architecture)) sys.stderr.write(\" -t <target>,", "file g = generator.createGenerator(targetOS) if g is None: sys.stderr.write('Failed to find code generator", "(\"\\tPLY module not found\") installStr += \" python-ply\" print (installStr) return False return", "with or without # modification, are permitted provided that the following conditions are", "INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, #", "print (\"MansOS version: \" + release + \" (Release date: \" + date", "def main(): if not importsOk(): exit(1) # import pathname where seal package is", "is special character, creates problems in makefile where this path is inserted else:", "\"config\") return -1 if g.isComponentUsed(\"network\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) g.generateForwarderCode(os.path.join(outputDirName, 'fwd'), makefilePathToOS) g.generateCollectorCode(os.path.join(outputDirName, 'coll'),", "conditions and the following disclaimer. # * Redistributions in binary form must reproduce", "\"--output\"): outputFileName = a elif o in (\"-p\", \"--path\"): pathToOS = a elif", "+ \"config-tmp\") except OSError as e: pass with open(outputDirName + \"config-tmp\", 'a+') as", "print (\"\\tPLY module not found\") installStr += \" python-ply\" print (installStr) return False", "OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER", "2012 <NAME> # # Redistribution and use in source and binary forms, with", "--path <path> Path to the target OS installation (default: {0})\\n\".format(pathToOS)) sys.stderr.write(\" -V, --verbose", "IOError as e: try: os.remove(outputDirName + \"config-tmp\") except OSError as e: pass with", "OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER", "def printLine(line): sys.stderr.write(line) def help(isError): sys.stderr.write(\"Usage:\\n\") sys.stderr.write(\" -a <arch>, --arch Target architecture (defalt:", "problems in makefile where this path is inserted else: makefilePathToOS = os.path.normpath(dirname +", "= 'main.c' architecture = 'testarch' #architecture = 'msp430' #architecture = 'pc' targetOS =", "parseCommandLine(sys.argv) # for extension modules sys.path.append(os.path.join(os.getcwd(), os.path.dirname(inputFileName))) # read file to-be-parsed with open(inputFileName,", "shutil.copyfile(outputDirName + \"..\" + os.sep + \"config\", outputDirName + \"config-tmp\") except IOError as", "= '../..' verboseMode = False testMode = False def exitProgram(code): if not testMode:", "Copyright (c) 2012 <NAME> # # Redistribution and use in source and binary", "following disclaimer. # * Redistributions in binary form must reproduce the above copyright", "= \"Make sure you have installed modules:\" print (\"Cannot run SEAL parser:\") if", "= args[0] args = args[1:] if len(args): sys.stderr.write(\"Too many arguments given. ({0} remaining", "+ os.sep + pathToOS) with open(outputFileName, 'w') as outputFile: g.generate(outputFile) with open(outputDirName +", "provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "importsOk(): plyModuleOK = True # Python Lex Yacc - for compilation try: import", "#!/usr/bin/python # (because /usr/bin/env python does not work when called from IDE on", "OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR", "OF THE POSSIBILITY OF SUCH DAMAGE. import os, sys, getopt, shutil inputFileName =", "ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED", "#architecture = 'pc' targetOS = 'mansos' pathToOS = '../..' verboseMode = False testMode", "compilation try: import ply except ImportError: plyModuleOK = False if not plyModuleOK: if", "global verboseMode global testMode global pathToOS try: opts, args = getopt.getopt(sys.argv[1:], \"a:cho:p:t:Vv\", [\"arch=\",", "g.generate(outputFile) with open(outputDirName + \"Makefile\", 'w') as outputFile: g.generateMakefile(outputFile, outputFileName, makefilePathToOS) # use", "= args[1:] if len(args): sys.stderr.write(\"Too many arguments given. ({0} remaining not parsed)\\n\".format(args)) isError", "+ \"config\") == 0) except: isSame = False if not isSame: try: shutil.move(outputDirName", "outputDirName + \"config\") except Exception as ex: print (ex) if generator.components.componentRegister.isError: # cleanup", "len(outputDirName): outputDirName += os.sep if not os.path.exists(outputDirName): os.makedirs(outputDirName) numDirs = len(os.path.normpath(outputFileName).split(os.sep)) - 1", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO", "sys.stderr.write(line) def help(isError): sys.stderr.write(\"Usage:\\n\") sys.stderr.write(\" -a <arch>, --arch Target architecture (defalt: {})\\n\".format(architecture)) sys.stderr.write(\"", "if os.name == 'posix': installStr = \"Make sure you have installed required modules.", "NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "parsed)\\n\".format(args)) isError = True if showHelp or isError: help(isError) def main(): if not", "file, '-' for stdout (default: {0})\\n\".format(outputFileName)) sys.stderr.write(\" -p, --path <path> Path to the", "CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL", "list of conditions and the following disclaimer. # * Redistributions in binary form", "does not work when called from IDE on Windows) # # Copyright (c)", "(SEAL code) parser = generator.SealParser(architecture, printLine, verboseMode) parser.run(contents) if parser.isError: exitProgram(1) # do", "in the # documentation and/or other materials provided with the distribution. # THIS", "str(code)) raise Exception def importsOk(): plyModuleOK = True # Python Lex Yacc -", "help\\n\") sys.exit(int(isError)) def parseCommandLine(argv): global inputFileName global outputFileName global architecture global verboseMode global", "testMode global pathToOS try: opts, args = getopt.getopt(sys.argv[1:], \"a:cho:p:t:Vv\", [\"arch=\", \"continue\", \"help\", \"output=\",", "\" + date + \")\") sys.exit(0) elif o in (\"-V\", \"--verbose\"): verboseMode =", "contents == None: sys.stderr.write('Failed to read file {0}'.format(inputFileName)) exitProgram(1) # parse input file", "release = lines[0].strip() if len(lines) > 1: date = lines[1].strip() except: pass print", "Redistributions in binary form must reproduce the above copyright # notice, this list", "elif o in (\"-o\", \"--output\"): outputFileName = a elif o in (\"-p\", \"--path\"):", "'msp430' #architecture = 'pc' targetOS = 'mansos' pathToOS = '../..' verboseMode = False", "sys.stderr.write(\"Usage:\\n\") sys.stderr.write(\" -a <arch>, --arch Target architecture (defalt: {})\\n\".format(architecture)) sys.stderr.write(\" -t <target>, --target", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR", "printLine(line): sys.stderr.write(line) def help(isError): sys.stderr.write(\"Usage:\\n\") sys.stderr.write(\" -a <arch>, --arch Target architecture (defalt: {})\\n\".format(architecture))", "sys.stderr.write(\" -v, --version Print version and exit\\n\") sys.stderr.write(\" -c, --continue Continue on errors", "if different: saves rebuiding time. try: isSame = (os.system(\"cmp -s \" + outputDirName", "1: date = lines[1].strip() except: pass print (\"MansOS version: \" + release +", "'-': g.generate(sys.stdout) else: outputDirName = os.path.dirname(outputFileName) if len(outputDirName): outputDirName += os.sep if not", "plyModuleOK: if os.name == 'posix': installStr = \"Make sure you have installed required", "(INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS", "PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS", "for compilation try: import ply except ImportError: plyModuleOK = False if not plyModuleOK:", "date + \")\") sys.exit(0) elif o in (\"-V\", \"--verbose\"): verboseMode = True elif", "# for extension modules sys.path.append(os.path.join(os.getcwd(), os.path.dirname(inputFileName))) # read file to-be-parsed with open(inputFileName, 'r')", "sys.path.append(os.path.join(os.getcwd(), os.path.dirname(inputFileName))) # read file to-be-parsed with open(inputFileName, 'r') as inputFile: contents =", "HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "of source code must retain the above copyright notice, # this list of", "global pathToOS try: opts, args = getopt.getopt(sys.argv[1:], \"a:cho:p:t:Vv\", [\"arch=\", \"continue\", \"help\", \"output=\", \"path=\",", "True if showHelp or isError: help(isError) def main(): if not importsOk(): exit(1) #", "len(args): sys.stderr.write(\"Too many arguments given. ({0} remaining not parsed)\\n\".format(args)) isError = True if", "-s \" + outputDirName + \"config-tmp \" + outputDirName + \"config\") == 0)", "outputDirName + \"config\") == 0) except: isSame = False if not isSame: try:", "Continue on errors (test mode)\\n\") sys.stderr.write(\" -h, --help Print this help\\n\") sys.exit(int(isError)) def", "import ply except ImportError: plyModuleOK = False if not plyModuleOK: if os.name ==", "pathToOS try: opts, args = getopt.getopt(sys.argv[1:], \"a:cho:p:t:Vv\", [\"arch=\", \"continue\", \"help\", \"output=\", \"path=\", \"target=\",", "global architecture global verboseMode global testMode global pathToOS try: opts, args = getopt.getopt(sys.argv[1:],", "TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "multiple times generator.components.clearGlobals() parseCommandLine(sys.argv) # for extension modules sys.path.append(os.path.join(os.getcwd(), os.path.dirname(inputFileName))) # read file", "with open(outputFileName, 'w') as outputFile: g.generate(outputFile) with open(outputDirName + \"Makefile\", 'w') as outputFile:", "ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR #", "+ \"config-tmp \" + outputDirName + \"config\") == 0) except: isSame = False", "= 'msp430' #architecture = 'pc' targetOS = 'mansos' pathToOS = '../..' verboseMode =", "Redistributions of source code must retain the above copyright notice, # this list", "open(outputDirName + \"Makefile\", 'w') as outputFile: g.generateMakefile(outputFile, outputFileName, makefilePathToOS) # use SEAL application's", "if outputFileName == '-': g.generate(sys.stdout) else: outputDirName = os.path.dirname(outputFileName) if len(outputDirName): outputDirName +=", "= True # Python Lex Yacc - for compilation try: import ply except", "found\") installStr += \" python-ply\" print (installStr) return False return True def printLine(line):", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY", "DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;", "HOLDER OR CONTRIBUTORS OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; #", "code) parser = generator.SealParser(architecture, printLine, verboseMode) parser.run(contents) if parser.isError: exitProgram(1) # do not", "* Redistributions in binary form must reproduce the above copyright # notice, this", "= lines[0].strip() if len(lines) > 1: date = lines[1].strip() except: pass print (\"MansOS", "or without # modification, are permitted provided that the following conditions are met:", "in makefile where this path is inserted else: makefilePathToOS = os.path.normpath(dirname + os.sep", "args[1:] if len(args): sys.stderr.write(\"Too many arguments given. ({0} remaining not parsed)\\n\".format(args)) isError =", "= getopt.getopt(sys.argv[1:], \"a:cho:p:t:Vv\", [\"arch=\", \"continue\", \"help\", \"output=\", \"path=\", \"target=\", \"verbose\", \"version\"]) except getopt.GetoptError", "# Copyright (c) 2012 <NAME> # # Redistribution and use in source and", "args = getopt.getopt(sys.argv[1:], \"a:cho:p:t:Vv\", [\"arch=\", \"continue\", \"help\", \"output=\", \"path=\", \"target=\", \"verbose\", \"version\"]) except", "os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools')) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools', 'seal', 'components')) from seal import generator", "inputFile.read() if contents == None: sys.stderr.write('Failed to read file {0}'.format(inputFileName)) exitProgram(1) # parse", "(ex) if generator.components.componentRegister.isError: # cleanup os.remove(outputFileName) os.remove(outputDirName + \"Makefile\") os.remove(outputDirName + \"config\") return", "OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY", "try: import ply except ImportError: plyModuleOK = False if not plyModuleOK: if os.name", "conditions are met: # * Redistributions of source code must retain the above", "mode)\\n\") sys.stderr.write(\" -h, --help Print this help\\n\") sys.exit(int(isError)) def parseCommandLine(argv): global inputFileName global", "'components')) from seal import generator # in case this is used multiple times", "(\"Cannot run SEAL parser:\") if not plyModuleOK: print (\"\\tPLY module not found\") installStr", "time. try: isSame = (os.system(\"cmp -s \" + outputDirName + \"config-tmp \" +", "to-be-parsed with open(inputFileName, 'r') as inputFile: contents = inputFile.read() if contents == None:", "C code to an output file g = generator.createGenerator(targetOS) if g is None:", "def importsOk(): plyModuleOK = True # Python Lex Yacc - for compilation try:", "\"--help\"): showHelp = True elif o in (\"-o\", \"--output\"): outputFileName = a elif", "# * Redistributions in binary form must reproduce the above copyright # notice,", "# notice, this list of conditions and the following disclaimer in the #", "except getopt.GetoptError as err: # print help information and exit: print (str(err)) #", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN", "# modification, are permitted provided that the following conditions are met: # *", "g.generateCollectorCode(os.path.join(outputDirName, 'coll'), makefilePathToOS) elif g.isComponentUsed(\"radio\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) if g.isComponentUsed(\"sdcard\"): g.generateRaw2Csv(outputDirName, os.path.join(selfDirname, 'raw2csv-template.py'))", "to file, '-' for stdout (default: {0})\\n\".format(outputFileName)) sys.stderr.write(\" -p, --path <path> Path to", "pathToOS = a elif o in (\"-c\", \"--continue\"): testMode = True if len(args):", "try: opts, args = getopt.getopt(sys.argv[1:], \"a:cho:p:t:Vv\", [\"arch=\", \"continue\", \"help\", \"output=\", \"path=\", \"target=\", \"verbose\",", "as outputFile: g.generateConfigFile(outputFile) # replace the config file only if different: saves rebuiding", "sys.stderr.write(\" -t <target>, --target Target OS (default: {0})\\n\".format(targetOS)) sys.stderr.write(\" -o, --output <file> Output", "--help Print this help\\n\") sys.exit(int(isError)) def parseCommandLine(argv): global inputFileName global outputFileName global architecture", "LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND", "(\"-h\", \"--help\"): showHelp = True elif o in (\"-o\", \"--output\"): outputFileName = a", "open(outputFileName, 'w') as outputFile: g.generate(outputFile) with open(outputDirName + \"Makefile\", 'w') as outputFile: g.generateMakefile(outputFile,", "'-' for stdout (default: {0})\\n\".format(outputFileName)) sys.stderr.write(\" -p, --path <path> Path to the target", "os.remove(outputDirName + \"config-tmp\") except OSError as e: pass with open(outputDirName + \"config-tmp\", 'a+')", "= True elif o in (\"-o\", \"--output\"): outputFileName = a elif o in", "LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR #", "'bs'), makefilePathToOS) g.generateForwarderCode(os.path.join(outputDirName, 'fwd'), makefilePathToOS) g.generateCollectorCode(os.path.join(outputDirName, 'coll'), makefilePathToOS) elif g.isComponentUsed(\"radio\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS)", "importsOk(): exit(1) # import pathname where seal package is located selfDirname = os.path.dirname(os.path.realpath(__file__))", "THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE", "o in (\"-V\", \"--verbose\"): verboseMode = True elif o in (\"-h\", \"--help\"): showHelp", "Exception def importsOk(): plyModuleOK = True # Python Lex Yacc - for compilation", "if showHelp or isError: help(isError) def main(): if not importsOk(): exit(1) # import", "generator.components.componentRegister.isError: # cleanup os.remove(outputFileName) os.remove(outputDirName + \"Makefile\") os.remove(outputDirName + \"config\") return -1 if", "sure you have installed modules:\" print (\"Cannot run SEAL parser:\") if not plyModuleOK:", "= a.lower() if o in (\"-t\", \"--target\"): targetOS = a.lower() elif o in", "generator.SealParser(architecture, printLine, verboseMode) parser.run(contents) if parser.isError: exitProgram(1) # do not generate output file", "target OS {0}'.format(targetOS)) exitProgram(1) if outputFileName == '-': g.generate(sys.stdout) else: outputDirName = os.path.dirname(outputFileName)", "len(lines) > 1: date = lines[1].strip() except: pass print (\"MansOS version: \" +", "\" python-ply\" print (installStr) return False return True def printLine(line): sys.stderr.write(line) def help(isError):", "os.makedirs(outputDirName) numDirs = len(os.path.normpath(outputFileName).split(os.sep)) - 1 dirname = os.path.dirname(os.path.realpath(outputFileName)) if os.path.isabs(pathToOS): makefilePathToOS =", "os.remove(outputDirName + \"Makefile\") os.remove(outputDirName + \"config\") return -1 if g.isComponentUsed(\"network\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS)", "Print version and exit\\n\") sys.stderr.write(\" -c, --continue Continue on errors (test mode)\\n\") sys.stderr.write(\"", "# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR", "+ \"config\") return -1 if g.isComponentUsed(\"network\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) g.generateForwarderCode(os.path.join(outputDirName, 'fwd'), makefilePathToOS) g.generateCollectorCode(os.path.join(outputDirName,", "OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS", "--verbose Verbose mode\\n\") sys.stderr.write(\" -v, --version Print version and exit\\n\") sys.stderr.write(\" -c, --continue", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN", "DAMAGE. import os, sys, getopt, shutil inputFileName = 'test.sl' outputFileName = 'main.c' architecture", "raise Exception def importsOk(): plyModuleOK = True # Python Lex Yacc - for", "and use in source and binary forms, with or without # modification, are", "(default: {0})\\n\".format(targetOS)) sys.stderr.write(\" -o, --output <file> Output to file, '-' for stdout (default:", "import pathname where seal package is located selfDirname = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools'))", "= len(os.path.normpath(outputFileName).split(os.sep)) - 1 dirname = os.path.dirname(os.path.realpath(outputFileName)) if os.path.isabs(pathToOS): makefilePathToOS = pathToOS.strip('\\\\'); #", "err: # print help information and exit: print (str(err)) # will print something", "> 1: date = lines[1].strip() except: pass print (\"MansOS version: \" + release", "release = \"Unknown\" date = \"Unknown\" try: f = open(versionFile, \"r\") lines =", "None: sys.stderr.write('Failed to read file {0}'.format(inputFileName)) exitProgram(1) # parse input file (SEAL code)", "OSError as e: pass with open(outputDirName + \"config-tmp\", 'a+') as outputFile: g.generateConfigFile(outputFile) #", "try: shutil.move(outputDirName + \"config-tmp\", outputDirName + \"config\") except Exception as ex: print (ex)", "inputFileName = args[0] args = args[1:] if len(args): sys.stderr.write(\"Too many arguments given. ({0}", "g.isComponentUsed(\"network\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) g.generateForwarderCode(os.path.join(outputDirName, 'fwd'), makefilePathToOS) g.generateCollectorCode(os.path.join(outputDirName, 'coll'), makefilePathToOS) elif g.isComponentUsed(\"radio\"): g.generateBaseStationCode(os.path.join(outputDirName,", "= os.path.join(\"../..\", \"doc/VERSION\") release = \"Unknown\" date = \"Unknown\" try: f = open(versionFile,", "other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE", "stdout (default: {0})\\n\".format(outputFileName)) sys.stderr.write(\" -p, --path <path> Path to the target OS installation", "try: f = open(versionFile, \"r\") lines = f.readlines() f.close() if len(lines) > 0:", "times generator.components.clearGlobals() parseCommandLine(sys.argv) # for extension modules sys.path.append(os.path.join(os.getcwd(), os.path.dirname(inputFileName))) # read file to-be-parsed", "in (\"-a\", \"--arch\"): architecture = a.lower() if o in (\"-t\", \"--target\"): targetOS =", "this list of conditions and the following disclaimer in the # documentation and/or", "BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF", "as err: # print help information and exit: print (str(err)) # will print", "# * Redistributions of source code must retain the above copyright notice, #", "os.path.dirname(outputFileName) if len(outputDirName): outputDirName += os.sep if not os.path.exists(outputDirName): os.makedirs(outputDirName) numDirs = len(os.path.normpath(outputFileName).split(os.sep))", "except OSError as e: pass with open(outputDirName + \"config-tmp\", 'a+') as outputFile: g.generateConfigFile(outputFile)", "'w') as outputFile: g.generateMakefile(outputFile, outputFileName, makefilePathToOS) # use SEAL application's config file as", "USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY", "Print this help\\n\") sys.exit(int(isError)) def parseCommandLine(argv): global inputFileName global outputFileName global architecture global", "code generator for target OS {0}'.format(targetOS)) exitProgram(1) if outputFileName == '-': g.generate(sys.stdout) else:", "date: \" + date + \")\") sys.exit(0) elif o in (\"-V\", \"--verbose\"): verboseMode", "outputFileName == '-': g.generate(sys.stdout) else: outputDirName = os.path.dirname(outputFileName) if len(outputDirName): outputDirName += os.sep", "o in (\"-p\", \"--path\"): pathToOS = a elif o in (\"-c\", \"--continue\"): testMode", "= a elif o in (\"-p\", \"--path\"): pathToOS = a elif o in", "generator.createGenerator(targetOS) if g is None: sys.stderr.write('Failed to find code generator for target OS", "testMode = False def exitProgram(code): if not testMode: exit(code) print (\"Would exit from", "except Exception as ex: print (ex) if generator.components.componentRegister.isError: # cleanup os.remove(outputFileName) os.remove(outputDirName +", "do not generate output file in this case # generate C code to", "+ \"Makefile\", 'w') as outputFile: g.generateMakefile(outputFile, outputFileName, makefilePathToOS) # use SEAL application's config", "= True if showHelp or isError: help(isError) def main(): if not importsOk(): exit(1)", "else: outputDirName = os.path.dirname(outputFileName) if len(outputDirName): outputDirName += os.sep if not os.path.exists(outputDirName): os.makedirs(outputDirName)", "disclaimer in the # documentation and/or other materials provided with the distribution. #", "where seal package is located selfDirname = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools')) sys.path.append(os.path.join(selfDirname, pathToOS,", "date = lines[1].strip() except: pass print (\"MansOS version: \" + release + \"", "plyModuleOK: print (\"\\tPLY module not found\") installStr += \" python-ply\" print (installStr) return", "OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF", "is inserted else: makefilePathToOS = os.path.normpath(dirname + os.sep + ('/..' * numDirs) +", "if len(lines) > 0: release = lines[0].strip() if len(lines) > 1: date =", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT", "OS {0}'.format(targetOS)) exitProgram(1) if outputFileName == '-': g.generate(sys.stdout) else: outputDirName = os.path.dirname(outputFileName) if", "verboseMode) parser.run(contents) if parser.isError: exitProgram(1) # do not generate output file in this", "help information and exit: print (str(err)) # will print something like \"option -a", "generator for target OS {0}'.format(targetOS)) exitProgram(1) if outputFileName == '-': g.generate(sys.stdout) else: outputDirName", "parser:\") if not plyModuleOK: print (\"\\tPLY module not found\") installStr += \" python-ply\"", "+ \"config-tmp\", 'a+') as outputFile: g.generateConfigFile(outputFile) # replace the config file only if", "special character, creates problems in makefile where this path is inserted else: makefilePathToOS", "== 'posix': installStr = \"Make sure you have installed required modules. Run:\\n\\tsudo apt-get", "Redistribution and use in source and binary forms, with or without # modification,", "extension modules sys.path.append(os.path.join(os.getcwd(), os.path.dirname(inputFileName))) # read file to-be-parsed with open(inputFileName, 'r') as inputFile:", "errors (test mode)\\n\") sys.stderr.write(\" -h, --help Print this help\\n\") sys.exit(int(isError)) def parseCommandLine(argv): global", "source and binary forms, with or without # modification, are permitted provided that", "BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "True elif o in (\"-h\", \"--help\"): showHelp = True elif o in (\"-o\",", "False if not isSame: try: shutil.move(outputDirName + \"config-tmp\", outputDirName + \"config\") except Exception", "= False testMode = False def exitProgram(code): if not testMode: exit(code) print (\"Would", "isSame = False if not isSame: try: shutil.move(outputDirName + \"config-tmp\", outputDirName + \"config\")", "EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE", "'r') as inputFile: contents = inputFile.read() if contents == None: sys.stderr.write('Failed to read", "not found\") installStr += \" python-ply\" print (installStr) return False return True def", "above copyright # notice, this list of conditions and the following disclaimer in", "parser = generator.SealParser(architecture, printLine, verboseMode) parser.run(contents) if parser.isError: exitProgram(1) # do not generate", "FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT", "isError = True if showHelp or isError: help(isError) def main(): if not importsOk():", "'tools', 'seal', 'components')) from seal import generator # in case this is used", "* numDirs) + os.sep + pathToOS) with open(outputFileName, 'w') as outputFile: g.generate(outputFile) with", "# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,", "testMode: exit(code) print (\"Would exit from program with code \" + str(code)) raise", "versionFile = os.path.join(\"../..\", \"doc/VERSION\") release = \"Unknown\" date = \"Unknown\" try: f =", "= a.lower() elif o in (\"-v\", \"--version\"): versionFile = os.path.join(\"../..\", \"doc/VERSION\") release =", "elif o in (\"-h\", \"--help\"): showHelp = True elif o in (\"-o\", \"--output\"):", "binary form must reproduce the above copyright # notice, this list of conditions", "exit\\n\") sys.stderr.write(\" -c, --continue Continue on errors (test mode)\\n\") sys.stderr.write(\" -h, --help Print", "distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS", "sys.stderr.write('Failed to find code generator for target OS {0}'.format(targetOS)) exitProgram(1) if outputFileName ==", "form must reproduce the above copyright # notice, this list of conditions and", "numDirs) + os.sep + pathToOS) with open(outputFileName, 'w') as outputFile: g.generate(outputFile) with open(outputDirName", "# print help information and exit: print (str(err)) # will print something like", "you have installed required modules. Run:\\n\\tsudo apt-get install\" else: installStr = \"Make sure", "called from IDE on Windows) # # Copyright (c) 2012 <NAME> # #", "len(args): inputFileName = args[0] args = args[1:] if len(args): sys.stderr.write(\"Too many arguments given.", "# (because /usr/bin/env python does not work when called from IDE on Windows)", "(\"-p\", \"--path\"): pathToOS = a elif o in (\"-c\", \"--continue\"): testMode = True", "if not testMode: exit(code) print (\"Would exit from program with code \" +", "provided that the following conditions are met: # * Redistributions of source code", "args = args[1:] if len(args): sys.stderr.write(\"Too many arguments given. ({0} remaining not parsed)\\n\".format(args))", "f = open(versionFile, \"r\") lines = f.readlines() f.close() if len(lines) > 0: release", "in (\"-v\", \"--version\"): versionFile = os.path.join(\"../..\", \"doc/VERSION\") release = \"Unknown\" date = \"Unknown\"", "except IOError as e: try: os.remove(outputDirName + \"config-tmp\") except OSError as e: pass", "exit(1) # import pathname where seal package is located selfDirname = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(selfDirname,", "None: sys.stderr.write('Failed to find code generator for target OS {0}'.format(targetOS)) exitProgram(1) if outputFileName", "False testMode = False def exitProgram(code): if not testMode: exit(code) print (\"Would exit", "# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE)", "print (str(err)) # will print something like \"option -a not recognized\" help(True) isError", "application's config file as the basis try: shutil.copyfile(outputDirName + \"..\" + os.sep +", "(Release date: \" + date + \")\") sys.exit(0) elif o in (\"-V\", \"--verbose\"):", "= True if len(args): inputFileName = args[0] args = args[1:] if len(args): sys.stderr.write(\"Too", "{0})\\n\".format(pathToOS)) sys.stderr.write(\" -V, --verbose Verbose mode\\n\") sys.stderr.write(\" -v, --version Print version and exit\\n\")", "are met: # * Redistributions of source code must retain the above copyright", "'bs'), makefilePathToOS) if g.isComponentUsed(\"sdcard\"): g.generateRaw2Csv(outputDirName, os.path.join(selfDirname, 'raw2csv-template.py')) return 0 if __name__ == '__main__':", "False if not plyModuleOK: if os.name == 'posix': installStr = \"Make sure you", "(INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "makefilePathToOS) # use SEAL application's config file as the basis try: shutil.copyfile(outputDirName +", "= True elif o in (\"-h\", \"--help\"): showHelp = True elif o in", "getopt.getopt(sys.argv[1:], \"a:cho:p:t:Vv\", [\"arch=\", \"continue\", \"help\", \"output=\", \"path=\", \"target=\", \"verbose\", \"version\"]) except getopt.GetoptError as", "saves rebuiding time. try: isSame = (os.system(\"cmp -s \" + outputDirName + \"config-tmp", "USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH", "os.sep + ('/..' * numDirs) + os.sep + pathToOS) with open(outputFileName, 'w') as", "the above copyright # notice, this list of conditions and the following disclaimer", "else: makefilePathToOS = os.path.normpath(dirname + os.sep + ('/..' * numDirs) + os.sep +", "('/..' * numDirs) + os.sep + pathToOS) with open(outputFileName, 'w') as outputFile: g.generate(outputFile)", "if len(args): sys.stderr.write(\"Too many arguments given. ({0} remaining not parsed)\\n\".format(args)) isError = True", "g.generate(sys.stdout) else: outputDirName = os.path.dirname(outputFileName) if len(outputDirName): outputDirName += os.sep if not os.path.exists(outputDirName):", "== None: sys.stderr.write('Failed to read file {0}'.format(inputFileName)) exitProgram(1) # parse input file (SEAL", "is used multiple times generator.components.clearGlobals() parseCommandLine(sys.argv) # for extension modules sys.path.append(os.path.join(os.getcwd(), os.path.dirname(inputFileName))) #", "True # Python Lex Yacc - for compilation try: import ply except ImportError:", "# in case this is used multiple times generator.components.clearGlobals() parseCommandLine(sys.argv) # for extension", "cleanup os.remove(outputFileName) os.remove(outputDirName + \"Makefile\") os.remove(outputDirName + \"config\") return -1 if g.isComponentUsed(\"network\"): g.generateBaseStationCode(os.path.join(outputDirName,", "import os, sys, getopt, shutil inputFileName = 'test.sl' outputFileName = 'main.c' architecture =", "COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES,", "getopt, shutil inputFileName = 'test.sl' outputFileName = 'main.c' architecture = 'testarch' #architecture =", "in (\"-h\", \"--help\"): showHelp = True elif o in (\"-o\", \"--output\"): outputFileName =", "sys, getopt, shutil inputFileName = 'test.sl' outputFileName = 'main.c' architecture = 'testarch' #architecture", "except: isSame = False if not isSame: try: shutil.move(outputDirName + \"config-tmp\", outputDirName +", "version: \" + release + \" (Release date: \" + date + \")\")", "information and exit: print (str(err)) # will print something like \"option -a not", "g.generateConfigFile(outputFile) # replace the config file only if different: saves rebuiding time. try:", "= \"Unknown\" try: f = open(versionFile, \"r\") lines = f.readlines() f.close() if len(lines)", "run SEAL parser:\") if not plyModuleOK: print (\"\\tPLY module not found\") installStr +=", "an output file g = generator.createGenerator(targetOS) if g is None: sys.stderr.write('Failed to find", "+ pathToOS) with open(outputFileName, 'w') as outputFile: g.generate(outputFile) with open(outputDirName + \"Makefile\", 'w')", "\"..\" + os.sep + \"config\", outputDirName + \"config-tmp\") except IOError as e: try:", "and the following disclaimer in the # documentation and/or other materials provided with", "OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE", "modules. Run:\\n\\tsudo apt-get install\" else: installStr = \"Make sure you have installed modules:\"", "SUCH DAMAGE. import os, sys, getopt, shutil inputFileName = 'test.sl' outputFileName = 'main.c'", "mode\\n\") sys.stderr.write(\" -v, --version Print version and exit\\n\") sys.stderr.write(\" -c, --continue Continue on", "package is located selfDirname = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools')) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools', 'seal',", "len(os.path.normpath(outputFileName).split(os.sep)) - 1 dirname = os.path.dirname(os.path.realpath(outputFileName)) if os.path.isabs(pathToOS): makefilePathToOS = pathToOS.strip('\\\\'); # \\", "-a not recognized\" help(True) isError = False showHelp = False for o, a", "file as the basis try: shutil.copyfile(outputDirName + \"..\" + os.sep + \"config\", outputDirName", "= 'testarch' #architecture = 'msp430' #architecture = 'pc' targetOS = 'mansos' pathToOS =", "False return True def printLine(line): sys.stderr.write(line) def help(isError): sys.stderr.write(\"Usage:\\n\") sys.stderr.write(\" -a <arch>, --arch", "forms, with or without # modification, are permitted provided that the following conditions", "Yacc - for compilation try: import ply except ImportError: plyModuleOK = False if", "= False for o, a in opts: if o in (\"-a\", \"--arch\"): architecture", "\"verbose\", \"version\"]) except getopt.GetoptError as err: # print help information and exit: print", "+ date + \")\") sys.exit(0) elif o in (\"-V\", \"--verbose\"): verboseMode = True", "pass with open(outputDirName + \"config-tmp\", 'a+') as outputFile: g.generateConfigFile(outputFile) # replace the config", "f.close() if len(lines) > 0: release = lines[0].strip() if len(lines) > 1: date", "CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY", "({0} remaining not parsed)\\n\".format(args)) isError = True if showHelp or isError: help(isError) def", "+ release + \" (Release date: \" + date + \")\") sys.exit(0) elif", "= 'test.sl' outputFileName = 'main.c' architecture = 'testarch' #architecture = 'msp430' #architecture =", "\")\") sys.exit(0) elif o in (\"-V\", \"--verbose\"): verboseMode = True elif o in", "= \"Unknown\" date = \"Unknown\" try: f = open(versionFile, \"r\") lines = f.readlines()", "a in opts: if o in (\"-a\", \"--arch\"): architecture = a.lower() if o", "numDirs = len(os.path.normpath(outputFileName).split(os.sep)) - 1 dirname = os.path.dirname(os.path.realpath(outputFileName)) if os.path.isabs(pathToOS): makefilePathToOS = pathToOS.strip('\\\\');", "in source and binary forms, with or without # modification, are permitted provided", "HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT", "install\" else: installStr = \"Make sure you have installed modules:\" print (\"Cannot run", "'w') as outputFile: g.generate(outputFile) with open(outputDirName + \"Makefile\", 'w') as outputFile: g.generateMakefile(outputFile, outputFileName,", "installed required modules. Run:\\n\\tsudo apt-get install\" else: installStr = \"Make sure you have", "+ \"Makefile\") os.remove(outputDirName + \"config\") return -1 if g.isComponentUsed(\"network\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) g.generateForwarderCode(os.path.join(outputDirName,", "g is None: sys.stderr.write('Failed to find code generator for target OS {0}'.format(targetOS)) exitProgram(1)", "# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, #", "the target OS installation (default: {0})\\n\".format(pathToOS)) sys.stderr.write(\" -V, --verbose Verbose mode\\n\") sys.stderr.write(\" -v,", "# # Redistribution and use in source and binary forms, with or without", "exitProgram(code): if not testMode: exit(code) print (\"Would exit from program with code \"", "notice, # this list of conditions and the following disclaimer. # * Redistributions", "installStr += \" python-ply\" print (installStr) return False return True def printLine(line): sys.stderr.write(line)", "in (\"-p\", \"--path\"): pathToOS = a elif o in (\"-c\", \"--continue\"): testMode =", "this is used multiple times generator.components.clearGlobals() parseCommandLine(sys.argv) # for extension modules sys.path.append(os.path.join(os.getcwd(), os.path.dirname(inputFileName)))", "output file in this case # generate C code to an output file", "different: saves rebuiding time. try: isSame = (os.system(\"cmp -s \" + outputDirName +", "\" (Release date: \" + date + \")\") sys.exit(0) elif o in (\"-V\",", "a elif o in (\"-p\", \"--path\"): pathToOS = a elif o in (\"-c\",", "{0})\\n\".format(targetOS)) sys.stderr.write(\" -o, --output <file> Output to file, '-' for stdout (default: {0})\\n\".format(outputFileName))", "isError = False showHelp = False for o, a in opts: if o", "INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT,", "seal import generator # in case this is used multiple times generator.components.clearGlobals() parseCommandLine(sys.argv)", "OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR", "selfDirname = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools')) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools', 'seal', 'components')) from seal", "printLine, verboseMode) parser.run(contents) if parser.isError: exitProgram(1) # do not generate output file in", "try: shutil.copyfile(outputDirName + \"..\" + os.sep + \"config\", outputDirName + \"config-tmp\") except IOError", "NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF", "sys.stderr.write(\"Too many arguments given. ({0} remaining not parsed)\\n\".format(args)) isError = True if showHelp", "read file to-be-parsed with open(inputFileName, 'r') as inputFile: contents = inputFile.read() if contents", "= False if not isSame: try: shutil.move(outputDirName + \"config-tmp\", outputDirName + \"config\") except", "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED", "sys.stderr.write(\" -o, --output <file> Output to file, '-' for stdout (default: {0})\\n\".format(outputFileName)) sys.stderr.write(\"", "outputFile: g.generate(outputFile) with open(outputDirName + \"Makefile\", 'w') as outputFile: g.generateMakefile(outputFile, outputFileName, makefilePathToOS) #", "sys.stderr.write(\" -V, --verbose Verbose mode\\n\") sys.stderr.write(\" -v, --version Print version and exit\\n\") sys.stderr.write(\"", "+ \" (Release date: \" + date + \")\") sys.exit(0) elif o in", "not generate output file in this case # generate C code to an", "this path is inserted else: makefilePathToOS = os.path.normpath(dirname + os.sep + ('/..' *", "(defalt: {})\\n\".format(architecture)) sys.stderr.write(\" -t <target>, --target Target OS (default: {0})\\n\".format(targetOS)) sys.stderr.write(\" -o, --output", "SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR # CONTRIBUTORS BE LIABLE FOR ANY", "IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN", "(\"MansOS version: \" + release + \" (Release date: \" + date +", "as ex: print (ex) if generator.components.componentRegister.isError: # cleanup os.remove(outputFileName) os.remove(outputDirName + \"Makefile\") os.remove(outputDirName", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED.", "OR CONTRIBUTORS OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", "+ \"config-tmp\", outputDirName + \"config\") except Exception as ex: print (ex) if generator.components.componentRegister.isError:", "EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR # CONTRIBUTORS BE LIABLE FOR", "apt-get install\" else: installStr = \"Make sure you have installed modules:\" print (\"Cannot", "+ \")\") sys.exit(0) elif o in (\"-V\", \"--verbose\"): verboseMode = True elif o", "from seal import generator # in case this is used multiple times generator.components.clearGlobals()", "# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os, sys, getopt, shutil", "\"--verbose\"): verboseMode = True elif o in (\"-h\", \"--help\"): showHelp = True elif", "file to-be-parsed with open(inputFileName, 'r') as inputFile: contents = inputFile.read() if contents ==", "\"config-tmp\", outputDirName + \"config\") except Exception as ex: print (ex) if generator.components.componentRegister.isError: #", "installStr = \"Make sure you have installed modules:\" print (\"Cannot run SEAL parser:\")", "work when called from IDE on Windows) # # Copyright (c) 2012 <NAME>", "def exitProgram(code): if not testMode: exit(code) print (\"Would exit from program with code", "inserted else: makefilePathToOS = os.path.normpath(dirname + os.sep + ('/..' * numDirs) + os.sep", "exit(code) print (\"Would exit from program with code \" + str(code)) raise Exception", "this help\\n\") sys.exit(int(isError)) def parseCommandLine(argv): global inputFileName global outputFileName global architecture global verboseMode", "in (\"-V\", \"--verbose\"): verboseMode = True elif o in (\"-h\", \"--help\"): showHelp =", "== '-': g.generate(sys.stdout) else: outputDirName = os.path.dirname(outputFileName) if len(outputDirName): outputDirName += os.sep if", "\"help\", \"output=\", \"path=\", \"target=\", \"verbose\", \"version\"]) except getopt.GetoptError as err: # print help", "outputDirName += os.sep if not os.path.exists(outputDirName): os.makedirs(outputDirName) numDirs = len(os.path.normpath(outputFileName).split(os.sep)) - 1 dirname", "= pathToOS.strip('\\\\'); # \\ is special character, creates problems in makefile where this", "if generator.components.componentRegister.isError: # cleanup os.remove(outputFileName) os.remove(outputDirName + \"Makefile\") os.remove(outputDirName + \"config\") return -1", "g.generateForwarderCode(os.path.join(outputDirName, 'fwd'), makefilePathToOS) g.generateCollectorCode(os.path.join(outputDirName, 'coll'), makefilePathToOS) elif g.isComponentUsed(\"radio\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) if g.isComponentUsed(\"sdcard\"):", "version and exit\\n\") sys.stderr.write(\" -c, --continue Continue on errors (test mode)\\n\") sys.stderr.write(\" -h,", "use in source and binary forms, with or without # modification, are permitted", "if not isSame: try: shutil.move(outputDirName + \"config-tmp\", outputDirName + \"config\") except Exception as", "in binary form must reproduce the above copyright # notice, this list of", "False for o, a in opts: if o in (\"-a\", \"--arch\"): architecture =", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" #", "COPYRIGHT HOLDER OR CONTRIBUTORS OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,", "1 dirname = os.path.dirname(os.path.realpath(outputFileName)) if os.path.isabs(pathToOS): makefilePathToOS = pathToOS.strip('\\\\'); # \\ is special", "only if different: saves rebuiding time. try: isSame = (os.system(\"cmp -s \" +", "lines[1].strip() except: pass print (\"MansOS version: \" + release + \" (Release date:", "ImportError: plyModuleOK = False if not plyModuleOK: if os.name == 'posix': installStr =", "architecture = a.lower() if o in (\"-t\", \"--target\"): targetOS = a.lower() elif o", "args[0] args = args[1:] if len(args): sys.stderr.write(\"Too many arguments given. ({0} remaining not", "\" + outputDirName + \"config\") == 0) except: isSame = False if not", "== 0) except: isSame = False if not isSame: try: shutil.move(outputDirName + \"config-tmp\",", "program with code \" + str(code)) raise Exception def importsOk(): plyModuleOK = True", "= f.readlines() f.close() if len(lines) > 0: release = lines[0].strip() if len(lines) >", "# will print something like \"option -a not recognized\" help(True) isError = False", "THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import", "sys.path.append(os.path.join(selfDirname, pathToOS, 'tools')) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools', 'seal', 'components')) from seal import generator #", "-1 if g.isComponentUsed(\"network\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) g.generateForwarderCode(os.path.join(outputDirName, 'fwd'), makefilePathToOS) g.generateCollectorCode(os.path.join(outputDirName, 'coll'), makefilePathToOS) elif", "you have installed modules:\" print (\"Cannot run SEAL parser:\") if not plyModuleOK: print", "use SEAL application's config file as the basis try: shutil.copyfile(outputDirName + \"..\" +", "to find code generator for target OS {0}'.format(targetOS)) exitProgram(1) if outputFileName == '-':", "- 1 dirname = os.path.dirname(os.path.realpath(outputFileName)) if os.path.isabs(pathToOS): makefilePathToOS = pathToOS.strip('\\\\'); # \\ is", "(\"-a\", \"--arch\"): architecture = a.lower() if o in (\"-t\", \"--target\"): targetOS = a.lower()", "\"version\"]) except getopt.GetoptError as err: # print help information and exit: print (str(err))", "exitProgram(1) # do not generate output file in this case # generate C", "PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,", "architecture (defalt: {})\\n\".format(architecture)) sys.stderr.write(\" -t <target>, --target Target OS (default: {0})\\n\".format(targetOS)) sys.stderr.write(\" -o,", "when called from IDE on Windows) # # Copyright (c) 2012 <NAME> #", "print help information and exit: print (str(err)) # will print something like \"option", "# this list of conditions and the following disclaimer. # * Redistributions in", "Lex Yacc - for compilation try: import ply except ImportError: plyModuleOK = False", "'a+') as outputFile: g.generateConfigFile(outputFile) # replace the config file only if different: saves", "\"config-tmp\") except OSError as e: pass with open(outputDirName + \"config-tmp\", 'a+') as outputFile:", "replace the config file only if different: saves rebuiding time. try: isSame =", "\"config\") except Exception as ex: print (ex) if generator.components.componentRegister.isError: # cleanup os.remove(outputFileName) os.remove(outputDirName", "if contents == None: sys.stderr.write('Failed to read file {0}'.format(inputFileName)) exitProgram(1) # parse input", "pathToOS) with open(outputFileName, 'w') as outputFile: g.generate(outputFile) with open(outputDirName + \"Makefile\", 'w') as", "\"config\") == 0) except: isSame = False if not isSame: try: shutil.move(outputDirName +", "testMode = True if len(args): inputFileName = args[0] args = args[1:] if len(args):", "opts, args = getopt.getopt(sys.argv[1:], \"a:cho:p:t:Vv\", [\"arch=\", \"continue\", \"help\", \"output=\", \"path=\", \"target=\", \"verbose\", \"version\"])", "= os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools')) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools', 'seal', 'components')) from seal import", "Run:\\n\\tsudo apt-get install\" else: installStr = \"Make sure you have installed modules:\" print", "makefilePathToOS) g.generateForwarderCode(os.path.join(outputDirName, 'fwd'), makefilePathToOS) g.generateCollectorCode(os.path.join(outputDirName, 'coll'), makefilePathToOS) elif g.isComponentUsed(\"radio\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) if", "# documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS", "except ImportError: plyModuleOK = False if not plyModuleOK: if os.name == 'posix': installStr", "recognized\" help(True) isError = False showHelp = False for o, a in opts:", "global inputFileName global outputFileName global architecture global verboseMode global testMode global pathToOS try:", "as inputFile: contents = inputFile.read() if contents == None: sys.stderr.write('Failed to read file", "DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY", "= \"Make sure you have installed required modules. Run:\\n\\tsudo apt-get install\" else: installStr", "elif o in (\"-p\", \"--path\"): pathToOS = a elif o in (\"-c\", \"--continue\"):", "CONTRIBUTORS OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, #", "= inputFile.read() if contents == None: sys.stderr.write('Failed to read file {0}'.format(inputFileName)) exitProgram(1) #", "if not plyModuleOK: print (\"\\tPLY module not found\") installStr += \" python-ply\" print", "sure you have installed required modules. Run:\\n\\tsudo apt-get install\" else: installStr = \"Make", "len(lines) > 0: release = lines[0].strip() if len(lines) > 1: date = lines[1].strip()", "(test mode)\\n\") sys.stderr.write(\" -h, --help Print this help\\n\") sys.exit(int(isError)) def parseCommandLine(argv): global inputFileName", "-V, --verbose Verbose mode\\n\") sys.stderr.write(\" -v, --version Print version and exit\\n\") sys.stderr.write(\" -c,", "e: try: os.remove(outputDirName + \"config-tmp\") except OSError as e: pass with open(outputDirName +", "modules sys.path.append(os.path.join(os.getcwd(), os.path.dirname(inputFileName))) # read file to-be-parsed with open(inputFileName, 'r') as inputFile: contents", "# do not generate output file in this case # generate C code", "# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;", "pathname where seal package is located selfDirname = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools')) sys.path.append(os.path.join(selfDirname,", "+ outputDirName + \"config\") == 0) except: isSame = False if not isSame:", "+ \"config\") except Exception as ex: print (ex) if generator.components.componentRegister.isError: # cleanup os.remove(outputFileName)", "<NAME> # # Redistribution and use in source and binary forms, with or", "= os.path.dirname(os.path.realpath(outputFileName)) if os.path.isabs(pathToOS): makefilePathToOS = pathToOS.strip('\\\\'); # \\ is special character, creates", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS", "if not importsOk(): exit(1) # import pathname where seal package is located selfDirname", "AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "pass print (\"MansOS version: \" + release + \" (Release date: \" +", "return False return True def printLine(line): sys.stderr.write(line) def help(isError): sys.stderr.write(\"Usage:\\n\") sys.stderr.write(\" -a <arch>,", "Python Lex Yacc - for compilation try: import ply except ImportError: plyModuleOK =", "and the following disclaimer. # * Redistributions in binary form must reproduce the", "'main.c' architecture = 'testarch' #architecture = 'msp430' #architecture = 'pc' targetOS = 'mansos'", "A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER", "{0}'.format(inputFileName)) exitProgram(1) # parse input file (SEAL code) parser = generator.SealParser(architecture, printLine, verboseMode)", "IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR # CONTRIBUTORS BE", "to the target OS installation (default: {0})\\n\".format(pathToOS)) sys.stderr.write(\" -V, --verbose Verbose mode\\n\") sys.stderr.write(\"", "g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) if g.isComponentUsed(\"sdcard\"): g.generateRaw2Csv(outputDirName, os.path.join(selfDirname, 'raw2csv-template.py')) return 0 if __name__ ==", "- for compilation try: import ply except ImportError: plyModuleOK = False if not", "in opts: if o in (\"-a\", \"--arch\"): architecture = a.lower() if o in", "verboseMode = True elif o in (\"-h\", \"--help\"): showHelp = True elif o", "= False if not plyModuleOK: if os.name == 'posix': installStr = \"Make sure", "elif o in (\"-c\", \"--continue\"): testMode = True if len(args): inputFileName = args[0]", "makefilePathToOS) elif g.isComponentUsed(\"radio\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) if g.isComponentUsed(\"sdcard\"): g.generateRaw2Csv(outputDirName, os.path.join(selfDirname, 'raw2csv-template.py')) return 0", "= False showHelp = False for o, a in opts: if o in", "parser.run(contents) if parser.isError: exitProgram(1) # do not generate output file in this case", "e: pass with open(outputDirName + \"config-tmp\", 'a+') as outputFile: g.generateConfigFile(outputFile) # replace the", "OF SUCH DAMAGE. import os, sys, getopt, shutil inputFileName = 'test.sl' outputFileName =", "+ os.sep + \"config\", outputDirName + \"config-tmp\") except IOError as e: try: os.remove(outputDirName", "outputDirName + \"config-tmp \" + outputDirName + \"config\") == 0) except: isSame =", "(\"-V\", \"--verbose\"): verboseMode = True elif o in (\"-h\", \"--help\"): showHelp = True", "must retain the above copyright notice, # this list of conditions and the", "IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "with code \" + str(code)) raise Exception def importsOk(): plyModuleOK = True #", "= generator.createGenerator(targetOS) if g is None: sys.stderr.write('Failed to find code generator for target", "many arguments given. ({0} remaining not parsed)\\n\".format(args)) isError = True if showHelp or", "ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF", "<arch>, --arch Target architecture (defalt: {})\\n\".format(architecture)) sys.stderr.write(\" -t <target>, --target Target OS (default:", "{0}'.format(targetOS)) exitProgram(1) if outputFileName == '-': g.generate(sys.stdout) else: outputDirName = os.path.dirname(outputFileName) if len(outputDirName):", "as outputFile: g.generateMakefile(outputFile, outputFileName, makefilePathToOS) # use SEAL application's config file as the", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY", "ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os, sys, getopt, shutil inputFileName", "sys.stderr.write(\" -a <arch>, --arch Target architecture (defalt: {})\\n\".format(architecture)) sys.stderr.write(\" -t <target>, --target Target", "<file> Output to file, '-' for stdout (default: {0})\\n\".format(outputFileName)) sys.stderr.write(\" -p, --path <path>", "print (installStr) return False return True def printLine(line): sys.stderr.write(line) def help(isError): sys.stderr.write(\"Usage:\\n\") sys.stderr.write(\"", "> 0: release = lines[0].strip() if len(lines) > 1: date = lines[1].strip() except:", "(default: {0})\\n\".format(outputFileName)) sys.stderr.write(\" -p, --path <path> Path to the target OS installation (default:", "architecture global verboseMode global testMode global pathToOS try: opts, args = getopt.getopt(sys.argv[1:], \"a:cho:p:t:Vv\",", "+ ('/..' * numDirs) + os.sep + pathToOS) with open(outputFileName, 'w') as outputFile:", "Path to the target OS installation (default: {0})\\n\".format(pathToOS)) sys.stderr.write(\" -V, --verbose Verbose mode\\n\")", "Exception as ex: print (ex) if generator.components.componentRegister.isError: # cleanup os.remove(outputFileName) os.remove(outputDirName + \"Makefile\")", "AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL", "+= \" python-ply\" print (installStr) return False return True def printLine(line): sys.stderr.write(line) def", "reproduce the above copyright # notice, this list of conditions and the following", "(os.system(\"cmp -s \" + outputDirName + \"config-tmp \" + outputDirName + \"config\") ==", "except: pass print (\"MansOS version: \" + release + \" (Release date: \"", "# generate C code to an output file g = generator.createGenerator(targetOS) if g", "-h, --help Print this help\\n\") sys.exit(int(isError)) def parseCommandLine(argv): global inputFileName global outputFileName global", "the above copyright notice, # this list of conditions and the following disclaimer.", "main(): if not importsOk(): exit(1) # import pathname where seal package is located", "to an output file g = generator.createGenerator(targetOS) if g is None: sys.stderr.write('Failed to", "g = generator.createGenerator(targetOS) if g is None: sys.stderr.write('Failed to find code generator for", "--arch Target architecture (defalt: {})\\n\".format(architecture)) sys.stderr.write(\" -t <target>, --target Target OS (default: {0})\\n\".format(targetOS))", "0: release = lines[0].strip() if len(lines) > 1: date = lines[1].strip() except: pass", "outputFileName = a elif o in (\"-p\", \"--path\"): pathToOS = a elif o", "\"Makefile\") os.remove(outputDirName + \"config\") return -1 if g.isComponentUsed(\"network\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) g.generateForwarderCode(os.path.join(outputDirName, 'fwd'),", "and binary forms, with or without # modification, are permitted provided that the", "# import pathname where seal package is located selfDirname = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(selfDirname, pathToOS,", "def help(isError): sys.stderr.write(\"Usage:\\n\") sys.stderr.write(\" -a <arch>, --arch Target architecture (defalt: {})\\n\".format(architecture)) sys.stderr.write(\" -t", "makefilePathToOS = os.path.normpath(dirname + os.sep + ('/..' * numDirs) + os.sep + pathToOS)", "open(versionFile, \"r\") lines = f.readlines() f.close() if len(lines) > 0: release = lines[0].strip()", "python-ply\" print (installStr) return False return True def printLine(line): sys.stderr.write(line) def help(isError): sys.stderr.write(\"Usage:\\n\")", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND", "architecture = 'testarch' #architecture = 'msp430' #architecture = 'pc' targetOS = 'mansos' pathToOS", "modification, are permitted provided that the following conditions are met: # * Redistributions", "(str(err)) # will print something like \"option -a not recognized\" help(True) isError =", "(c) 2012 <NAME> # # Redistribution and use in source and binary forms,", "not os.path.exists(outputDirName): os.makedirs(outputDirName) numDirs = len(os.path.normpath(outputFileName).split(os.sep)) - 1 dirname = os.path.dirname(os.path.realpath(outputFileName)) if os.path.isabs(pathToOS):", "(\"-t\", \"--target\"): targetOS = a.lower() elif o in (\"-v\", \"--version\"): versionFile = os.path.join(\"../..\",", "pathToOS, 'tools', 'seal', 'components')) from seal import generator # in case this is", "g.isComponentUsed(\"radio\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) if g.isComponentUsed(\"sdcard\"): g.generateRaw2Csv(outputDirName, os.path.join(selfDirname, 'raw2csv-template.py')) return 0 if __name__", "parser.isError: exitProgram(1) # do not generate output file in this case # generate", "= os.path.normpath(dirname + os.sep + ('/..' * numDirs) + os.sep + pathToOS) with", "OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON", "from program with code \" + str(code)) raise Exception def importsOk(): plyModuleOK =", "following disclaimer in the # documentation and/or other materials provided with the distribution.", "not isSame: try: shutil.move(outputDirName + \"config-tmp\", outputDirName + \"config\") except Exception as ex:", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED", "g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) g.generateForwarderCode(os.path.join(outputDirName, 'fwd'), makefilePathToOS) g.generateCollectorCode(os.path.join(outputDirName, 'coll'), makefilePathToOS) elif g.isComponentUsed(\"radio\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'),", "parseCommandLine(argv): global inputFileName global outputFileName global architecture global verboseMode global testMode global pathToOS", "target OS installation (default: {0})\\n\".format(pathToOS)) sys.stderr.write(\" -V, --verbose Verbose mode\\n\") sys.stderr.write(\" -v, --version", "o, a in opts: if o in (\"-a\", \"--arch\"): architecture = a.lower() if", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND", "installStr = \"Make sure you have installed required modules. Run:\\n\\tsudo apt-get install\" else:", "Target OS (default: {0})\\n\".format(targetOS)) sys.stderr.write(\" -o, --output <file> Output to file, '-' for", "(\"-v\", \"--version\"): versionFile = os.path.join(\"../..\", \"doc/VERSION\") release = \"Unknown\" date = \"Unknown\" try:", "in case this is used multiple times generator.components.clearGlobals() parseCommandLine(sys.argv) # for extension modules", "# # Copyright (c) 2012 <NAME> # # Redistribution and use in source", "not testMode: exit(code) print (\"Would exit from program with code \" + str(code))", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,", "'mansos' pathToOS = '../..' verboseMode = False testMode = False def exitProgram(code): if", "will print something like \"option -a not recognized\" help(True) isError = False showHelp", "elif o in (\"-v\", \"--version\"): versionFile = os.path.join(\"../..\", \"doc/VERSION\") release = \"Unknown\" date", "basis try: shutil.copyfile(outputDirName + \"..\" + os.sep + \"config\", outputDirName + \"config-tmp\") except", "as e: try: os.remove(outputDirName + \"config-tmp\") except OSError as e: pass with open(outputDirName", "IDE on Windows) # # Copyright (c) 2012 <NAME> # # Redistribution and", "\"Make sure you have installed modules:\" print (\"Cannot run SEAL parser:\") if not", "makefile where this path is inserted else: makefilePathToOS = os.path.normpath(dirname + os.sep +", "located selfDirname = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools')) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools', 'seal', 'components')) from", "open(outputDirName + \"config-tmp\", 'a+') as outputFile: g.generateConfigFile(outputFile) # replace the config file only", "\" + str(code)) raise Exception def importsOk(): plyModuleOK = True # Python Lex", "'posix': installStr = \"Make sure you have installed required modules. Run:\\n\\tsudo apt-get install\"", "'pc' targetOS = 'mansos' pathToOS = '../..' verboseMode = False testMode = False", "of conditions and the following disclaimer in the # documentation and/or other materials", "OS (default: {0})\\n\".format(targetOS)) sys.stderr.write(\" -o, --output <file> Output to file, '-' for stdout", "file in this case # generate C code to an output file g", "-c, --continue Continue on errors (test mode)\\n\") sys.stderr.write(\" -h, --help Print this help\\n\")", "not parsed)\\n\".format(args)) isError = True if showHelp or isError: help(isError) def main(): if", "\"path=\", \"target=\", \"verbose\", \"version\"]) except getopt.GetoptError as err: # print help information and", "= os.path.dirname(outputFileName) if len(outputDirName): outputDirName += os.sep if not os.path.exists(outputDirName): os.makedirs(outputDirName) numDirs =", "# use SEAL application's config file as the basis try: shutil.copyfile(outputDirName + \"..\"", "date = \"Unknown\" try: f = open(versionFile, \"r\") lines = f.readlines() f.close() if", "= 'mansos' pathToOS = '../..' verboseMode = False testMode = False def exitProgram(code):", "pathToOS = '../..' verboseMode = False testMode = False def exitProgram(code): if not", "try: os.remove(outputDirName + \"config-tmp\") except OSError as e: pass with open(outputDirName + \"config-tmp\",", "if o in (\"-t\", \"--target\"): targetOS = a.lower() elif o in (\"-v\", \"--version\"):", "verboseMode = False testMode = False def exitProgram(code): if not testMode: exit(code) print", "parse input file (SEAL code) parser = generator.SealParser(architecture, printLine, verboseMode) parser.run(contents) if parser.isError:", "ply except ImportError: plyModuleOK = False if not plyModuleOK: if os.name == 'posix':", "generator # in case this is used multiple times generator.components.clearGlobals() parseCommandLine(sys.argv) # for", "= generator.SealParser(architecture, printLine, verboseMode) parser.run(contents) if parser.isError: exitProgram(1) # do not generate output", "ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT", "makefilePathToOS) g.generateCollectorCode(os.path.join(outputDirName, 'coll'), makefilePathToOS) elif g.isComponentUsed(\"radio\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) if g.isComponentUsed(\"sdcard\"): g.generateRaw2Csv(outputDirName, os.path.join(selfDirname,", "if g.isComponentUsed(\"network\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) g.generateForwarderCode(os.path.join(outputDirName, 'fwd'), makefilePathToOS) g.generateCollectorCode(os.path.join(outputDirName, 'coll'), makefilePathToOS) elif g.isComponentUsed(\"radio\"):", "+ os.sep + ('/..' * numDirs) + os.sep + pathToOS) with open(outputFileName, 'w')", "for o, a in opts: if o in (\"-a\", \"--arch\"): architecture = a.lower()", "+ outputDirName + \"config-tmp \" + outputDirName + \"config\") == 0) except: isSame", "-a <arch>, --arch Target architecture (defalt: {})\\n\".format(architecture)) sys.stderr.write(\" -t <target>, --target Target OS", "makefilePathToOS = pathToOS.strip('\\\\'); # \\ is special character, creates problems in makefile where", "input file (SEAL code) parser = generator.SealParser(architecture, printLine, verboseMode) parser.run(contents) if parser.isError: exitProgram(1)", "-p, --path <path> Path to the target OS installation (default: {0})\\n\".format(pathToOS)) sys.stderr.write(\" -V,", "sys.exit(int(isError)) def parseCommandLine(argv): global inputFileName global outputFileName global architecture global verboseMode global testMode", "os.name == 'posix': installStr = \"Make sure you have installed required modules. Run:\\n\\tsudo", "showHelp = False for o, a in opts: if o in (\"-a\", \"--arch\"):", "lines[0].strip() if len(lines) > 1: date = lines[1].strip() except: pass print (\"MansOS version:", "for extension modules sys.path.append(os.path.join(os.getcwd(), os.path.dirname(inputFileName))) # read file to-be-parsed with open(inputFileName, 'r') as", "like \"option -a not recognized\" help(True) isError = False showHelp = False for", "sys.stderr.write('Failed to read file {0}'.format(inputFileName)) exitProgram(1) # parse input file (SEAL code) parser", "os, sys, getopt, shutil inputFileName = 'test.sl' outputFileName = 'main.c' architecture = 'testarch'", "CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY,", "in this case # generate C code to an output file g =", "if os.path.isabs(pathToOS): makefilePathToOS = pathToOS.strip('\\\\'); # \\ is special character, creates problems in", "0) except: isSame = False if not isSame: try: shutil.move(outputDirName + \"config-tmp\", outputDirName", "POSSIBILITY OF SUCH DAMAGE. import os, sys, getopt, shutil inputFileName = 'test.sl' outputFileName", "# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR", "\"continue\", \"help\", \"output=\", \"path=\", \"target=\", \"verbose\", \"version\"]) except getopt.GetoptError as err: # print", "code must retain the above copyright notice, # this list of conditions and", "code \" + str(code)) raise Exception def importsOk(): plyModuleOK = True # Python", "BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN", "\"Unknown\" try: f = open(versionFile, \"r\") lines = f.readlines() f.close() if len(lines) >", "os.sep if not os.path.exists(outputDirName): os.makedirs(outputDirName) numDirs = len(os.path.normpath(outputFileName).split(os.sep)) - 1 dirname = os.path.dirname(os.path.realpath(outputFileName))", "/usr/bin/env python does not work when called from IDE on Windows) # #", "THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #", "<path> Path to the target OS installation (default: {0})\\n\".format(pathToOS)) sys.stderr.write(\" -V, --verbose Verbose", "with open(inputFileName, 'r') as inputFile: contents = inputFile.read() if contents == None: sys.stderr.write('Failed", "isSame = (os.system(\"cmp -s \" + outputDirName + \"config-tmp \" + outputDirName +", "case # generate C code to an output file g = generator.createGenerator(targetOS) if", "print (\"Would exit from program with code \" + str(code)) raise Exception def", "-o, --output <file> Output to file, '-' for stdout (default: {0})\\n\".format(outputFileName)) sys.stderr.write(\" -p,", "outputDirName = os.path.dirname(outputFileName) if len(outputDirName): outputDirName += os.sep if not os.path.exists(outputDirName): os.makedirs(outputDirName) numDirs", "outputFile: g.generateMakefile(outputFile, outputFileName, makefilePathToOS) # use SEAL application's config file as the basis", "for target OS {0}'.format(targetOS)) exitProgram(1) if outputFileName == '-': g.generate(sys.stdout) else: outputDirName =", "inputFile: contents = inputFile.read() if contents == None: sys.stderr.write('Failed to read file {0}'.format(inputFileName))", "code to an output file g = generator.createGenerator(targetOS) if g is None: sys.stderr.write('Failed", "path is inserted else: makefilePathToOS = os.path.normpath(dirname + os.sep + ('/..' * numDirs)", "and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY", "in (\"-o\", \"--output\"): outputFileName = a elif o in (\"-p\", \"--path\"): pathToOS =", "given. ({0} remaining not parsed)\\n\".format(args)) isError = True if showHelp or isError: help(isError)", "print (\"Cannot run SEAL parser:\") if not plyModuleOK: print (\"\\tPLY module not found\")", "import generator # in case this is used multiple times generator.components.clearGlobals() parseCommandLine(sys.argv) #", "\"--path\"): pathToOS = a elif o in (\"-c\", \"--continue\"): testMode = True if", "used multiple times generator.components.clearGlobals() parseCommandLine(sys.argv) # for extension modules sys.path.append(os.path.join(os.getcwd(), os.path.dirname(inputFileName))) # read", "STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY", "lines = f.readlines() f.close() if len(lines) > 0: release = lines[0].strip() if len(lines)", "IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF #", "to read file {0}'.format(inputFileName)) exitProgram(1) # parse input file (SEAL code) parser =", "outputFile: g.generateConfigFile(outputFile) # replace the config file only if different: saves rebuiding time.", "+ \"config-tmp\") except IOError as e: try: os.remove(outputDirName + \"config-tmp\") except OSError as", "try: isSame = (os.system(\"cmp -s \" + outputDirName + \"config-tmp \" + outputDirName", "ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT", "required modules. Run:\\n\\tsudo apt-get install\" else: installStr = \"Make sure you have installed", "\"Unknown\" date = \"Unknown\" try: f = open(versionFile, \"r\") lines = f.readlines() f.close()", "file {0}'.format(inputFileName)) exitProgram(1) # parse input file (SEAL code) parser = generator.SealParser(architecture, printLine,", "os.path.normpath(dirname + os.sep + ('/..' * numDirs) + os.sep + pathToOS) with open(outputFileName,", "Target architecture (defalt: {})\\n\".format(architecture)) sys.stderr.write(\" -t <target>, --target Target OS (default: {0})\\n\".format(targetOS)) sys.stderr.write(\"", "as the basis try: shutil.copyfile(outputDirName + \"..\" + os.sep + \"config\", outputDirName +", "config file only if different: saves rebuiding time. try: isSame = (os.system(\"cmp -s", "the following disclaimer in the # documentation and/or other materials provided with the", "+ \"..\" + os.sep + \"config\", outputDirName + \"config-tmp\") except IOError as e:", "file (SEAL code) parser = generator.SealParser(architecture, printLine, verboseMode) parser.run(contents) if parser.isError: exitProgram(1) #", "must reproduce the above copyright # notice, this list of conditions and the", "os.path.join(\"../..\", \"doc/VERSION\") release = \"Unknown\" date = \"Unknown\" try: f = open(versionFile, \"r\")", "showHelp or isError: help(isError) def main(): if not importsOk(): exit(1) # import pathname", "makefilePathToOS) if g.isComponentUsed(\"sdcard\"): g.generateRaw2Csv(outputDirName, os.path.join(selfDirname, 'raw2csv-template.py')) return 0 if __name__ == '__main__': exit(main())", "materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "o in (\"-h\", \"--help\"): showHelp = True elif o in (\"-o\", \"--output\"): outputFileName", "on errors (test mode)\\n\") sys.stderr.write(\" -h, --help Print this help\\n\") sys.exit(int(isError)) def parseCommandLine(argv):", "as outputFile: g.generate(outputFile) with open(outputDirName + \"Makefile\", 'w') as outputFile: g.generateMakefile(outputFile, outputFileName, makefilePathToOS)", "copyright notice, # this list of conditions and the following disclaimer. # *", "creates problems in makefile where this path is inserted else: makefilePathToOS = os.path.normpath(dirname", "modules:\" print (\"Cannot run SEAL parser:\") if not plyModuleOK: print (\"\\tPLY module not", "outputFileName, makefilePathToOS) # use SEAL application's config file as the basis try: shutil.copyfile(outputDirName", "OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF", "\"doc/VERSION\") release = \"Unknown\" date = \"Unknown\" try: f = open(versionFile, \"r\") lines", "\"Make sure you have installed required modules. Run:\\n\\tsudo apt-get install\" else: installStr =", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"", "contents = inputFile.read() if contents == None: sys.stderr.write('Failed to read file {0}'.format(inputFileName)) exitProgram(1)", "OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "config file as the basis try: shutil.copyfile(outputDirName + \"..\" + os.sep + \"config\",", "TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR", "= lines[1].strip() except: pass print (\"MansOS version: \" + release + \" (Release", "True def printLine(line): sys.stderr.write(line) def help(isError): sys.stderr.write(\"Usage:\\n\") sys.stderr.write(\" -a <arch>, --arch Target architecture", "sys.stderr.write(\" -h, --help Print this help\\n\") sys.exit(int(isError)) def parseCommandLine(argv): global inputFileName global outputFileName", "the following disclaimer. # * Redistributions in binary form must reproduce the above", "(\"Would exit from program with code \" + str(code)) raise Exception def importsOk():", "# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE", "SEAL parser:\") if not plyModuleOK: print (\"\\tPLY module not found\") installStr += \"", "\\ is special character, creates problems in makefile where this path is inserted", "\"config\", outputDirName + \"config-tmp\") except IOError as e: try: os.remove(outputDirName + \"config-tmp\") except", "installed modules:\" print (\"Cannot run SEAL parser:\") if not plyModuleOK: print (\"\\tPLY module", "True elif o in (\"-o\", \"--output\"): outputFileName = a elif o in (\"-p\",", "#architecture = 'msp430' #architecture = 'pc' targetOS = 'mansos' pathToOS = '../..' verboseMode", "for stdout (default: {0})\\n\".format(outputFileName)) sys.stderr.write(\" -p, --path <path> Path to the target OS", "output file g = generator.createGenerator(targetOS) if g is None: sys.stderr.write('Failed to find code", "= a elif o in (\"-c\", \"--continue\"): testMode = True if len(args): inputFileName", "list of conditions and the following disclaimer in the # documentation and/or other", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED", "\" + outputDirName + \"config-tmp \" + outputDirName + \"config\") == 0) except:", "have installed required modules. Run:\\n\\tsudo apt-get install\" else: installStr = \"Make sure you", "exitProgram(1) # parse input file (SEAL code) parser = generator.SealParser(architecture, printLine, verboseMode) parser.run(contents)", "return True def printLine(line): sys.stderr.write(line) def help(isError): sys.stderr.write(\"Usage:\\n\") sys.stderr.write(\" -a <arch>, --arch Target", "Verbose mode\\n\") sys.stderr.write(\" -v, --version Print version and exit\\n\") sys.stderr.write(\" -c, --continue Continue", "following conditions are met: # * Redistributions of source code must retain the", "help(True) isError = False showHelp = False for o, a in opts: if", "the basis try: shutil.copyfile(outputDirName + \"..\" + os.sep + \"config\", outputDirName + \"config-tmp\")", "+ str(code)) raise Exception def importsOk(): plyModuleOK = True # Python Lex Yacc", "--target Target OS (default: {0})\\n\".format(targetOS)) sys.stderr.write(\" -o, --output <file> Output to file, '-'", "met: # * Redistributions of source code must retain the above copyright notice,", "seal package is located selfDirname = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools')) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools',", "os.sep + pathToOS) with open(outputFileName, 'w') as outputFile: g.generate(outputFile) with open(outputDirName + \"Makefile\",", "SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED", "outputFileName global architecture global verboseMode global testMode global pathToOS try: opts, args =", "# parse input file (SEAL code) parser = generator.SealParser(architecture, printLine, verboseMode) parser.run(contents) if", "if len(outputDirName): outputDirName += os.sep if not os.path.exists(outputDirName): os.makedirs(outputDirName) numDirs = len(os.path.normpath(outputFileName).split(os.sep)) -", "return -1 if g.isComponentUsed(\"network\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) g.generateForwarderCode(os.path.join(outputDirName, 'fwd'), makefilePathToOS) g.generateCollectorCode(os.path.join(outputDirName, 'coll'), makefilePathToOS)", "'fwd'), makefilePathToOS) g.generateCollectorCode(os.path.join(outputDirName, 'coll'), makefilePathToOS) elif g.isComponentUsed(\"radio\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) if g.isComponentUsed(\"sdcard\"): g.generateRaw2Csv(outputDirName,", "\"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "# Redistribution and use in source and binary forms, with or without #", "INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "--output <file> Output to file, '-' for stdout (default: {0})\\n\".format(outputFileName)) sys.stderr.write(\" -p, --path", "= False def exitProgram(code): if not testMode: exit(code) print (\"Would exit from program", "a.lower() if o in (\"-t\", \"--target\"): targetOS = a.lower() elif o in (\"-v\",", "not work when called from IDE on Windows) # # Copyright (c) 2012", "on Windows) # # Copyright (c) 2012 <NAME> # # Redistribution and use", "'testarch' #architecture = 'msp430' #architecture = 'pc' targetOS = 'mansos' pathToOS = '../..'", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR", "os.path.dirname(os.path.realpath(outputFileName)) if os.path.isabs(pathToOS): makefilePathToOS = pathToOS.strip('\\\\'); # \\ is special character, creates problems", "exit from program with code \" + str(code)) raise Exception def importsOk(): plyModuleOK", "if len(args): inputFileName = args[0] args = args[1:] if len(args): sys.stderr.write(\"Too many arguments", "with open(outputDirName + \"config-tmp\", 'a+') as outputFile: g.generateConfigFile(outputFile) # replace the config file", "NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR # CONTRIBUTORS BE LIABLE", "the config file only if different: saves rebuiding time. try: isSame = (os.system(\"cmp", "ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING", "NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os, sys, getopt,", "# Python Lex Yacc - for compilation try: import ply except ImportError: plyModuleOK", "a.lower() elif o in (\"-v\", \"--version\"): versionFile = os.path.join(\"../..\", \"doc/VERSION\") release = \"Unknown\"", "(\"-o\", \"--output\"): outputFileName = a elif o in (\"-p\", \"--path\"): pathToOS = a", "GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION)", "plyModuleOK = False if not plyModuleOK: if os.name == 'posix': installStr = \"Make", "is None: sys.stderr.write('Failed to find code generator for target OS {0}'.format(targetOS)) exitProgram(1) if", "isError: help(isError) def main(): if not importsOk(): exit(1) # import pathname where seal", "print something like \"option -a not recognized\" help(True) isError = False showHelp =", "remaining not parsed)\\n\".format(args)) isError = True if showHelp or isError: help(isError) def main():", "exitProgram(1) if outputFileName == '-': g.generate(sys.stdout) else: outputDirName = os.path.dirname(outputFileName) if len(outputDirName): outputDirName", "binary forms, with or without # modification, are permitted provided that the following", "Windows) # # Copyright (c) 2012 <NAME> # # Redistribution and use in", "EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os, sys,", "OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY,", "\"--version\"): versionFile = os.path.join(\"../..\", \"doc/VERSION\") release = \"Unknown\" date = \"Unknown\" try: f", "# read file to-be-parsed with open(inputFileName, 'r') as inputFile: contents = inputFile.read() if", "plyModuleOK = True # Python Lex Yacc - for compilation try: import ply", "if not plyModuleOK: if os.name == 'posix': installStr = \"Make sure you have", "installation (default: {0})\\n\".format(pathToOS)) sys.stderr.write(\" -V, --verbose Verbose mode\\n\") sys.stderr.write(\" -v, --version Print version", "o in (\"-t\", \"--target\"): targetOS = a.lower() elif o in (\"-v\", \"--version\"): versionFile", "if len(lines) > 1: date = lines[1].strip() except: pass print (\"MansOS version: \"", "\"--continue\"): testMode = True if len(args): inputFileName = args[0] args = args[1:] if", "\"config-tmp\") except IOError as e: try: os.remove(outputDirName + \"config-tmp\") except OSError as e:", "Output to file, '-' for stdout (default: {0})\\n\".format(outputFileName)) sys.stderr.write(\" -p, --path <path> Path", "OS installation (default: {0})\\n\".format(pathToOS)) sys.stderr.write(\" -V, --verbose Verbose mode\\n\") sys.stderr.write(\" -v, --version Print", "\"option -a not recognized\" help(True) isError = False showHelp = False for o,", "and exit: print (str(err)) # will print something like \"option -a not recognized\"", "generate output file in this case # generate C code to an output", "--version Print version and exit\\n\") sys.stderr.write(\" -c, --continue Continue on errors (test mode)\\n\")", "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING", "# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF", "targetOS = a.lower() elif o in (\"-v\", \"--version\"): versionFile = os.path.join(\"../..\", \"doc/VERSION\") release", "something like \"option -a not recognized\" help(True) isError = False showHelp = False", "o in (\"-v\", \"--version\"): versionFile = os.path.join(\"../..\", \"doc/VERSION\") release = \"Unknown\" date =", "if g is None: sys.stderr.write('Failed to find code generator for target OS {0}'.format(targetOS))", "this case # generate C code to an output file g = generator.createGenerator(targetOS)", "global testMode global pathToOS try: opts, args = getopt.getopt(sys.argv[1:], \"a:cho:p:t:Vv\", [\"arch=\", \"continue\", \"help\",", "the # documentation and/or other materials provided with the distribution. # THIS SOFTWARE", "(default: {0})\\n\".format(pathToOS)) sys.stderr.write(\" -V, --verbose Verbose mode\\n\") sys.stderr.write(\" -v, --version Print version and", "arguments given. ({0} remaining not parsed)\\n\".format(args)) isError = True if showHelp or isError:", "sys.path.append(os.path.join(selfDirname, pathToOS, 'tools', 'seal', 'components')) from seal import generator # in case this", "FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE", "PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR", "o in (\"-a\", \"--arch\"): architecture = a.lower() if o in (\"-t\", \"--target\"): targetOS", "file only if different: saves rebuiding time. try: isSame = (os.system(\"cmp -s \"", "THE POSSIBILITY OF SUCH DAMAGE. import os, sys, getopt, shutil inputFileName = 'test.sl'", "--continue Continue on errors (test mode)\\n\") sys.stderr.write(\" -h, --help Print this help\\n\") sys.exit(int(isError))", "documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED", "with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "or isError: help(isError) def main(): if not importsOk(): exit(1) # import pathname where", "dirname = os.path.dirname(os.path.realpath(outputFileName)) if os.path.isabs(pathToOS): makefilePathToOS = pathToOS.strip('\\\\'); # \\ is special character,", "with open(outputDirName + \"Makefile\", 'w') as outputFile: g.generateMakefile(outputFile, outputFileName, makefilePathToOS) # use SEAL", "\"config-tmp\", 'a+') as outputFile: g.generateConfigFile(outputFile) # replace the config file only if different:", "'seal', 'components')) from seal import generator # in case this is used multiple", "shutil.move(outputDirName + \"config-tmp\", outputDirName + \"config\") except Exception as ex: print (ex) if", "isSame: try: shutil.move(outputDirName + \"config-tmp\", outputDirName + \"config\") except Exception as ex: print", "(\"-c\", \"--continue\"): testMode = True if len(args): inputFileName = args[0] args = args[1:]", "OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS", "from IDE on Windows) # # Copyright (c) 2012 <NAME> # # Redistribution", "{})\\n\".format(architecture)) sys.stderr.write(\" -t <target>, --target Target OS (default: {0})\\n\".format(targetOS)) sys.stderr.write(\" -o, --output <file>", "\"--target\"): targetOS = a.lower() elif o in (\"-v\", \"--version\"): versionFile = os.path.join(\"../..\", \"doc/VERSION\")", "INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,", "if o in (\"-a\", \"--arch\"): architecture = a.lower() if o in (\"-t\", \"--target\"):", "permitted provided that the following conditions are met: # * Redistributions of source", "g.generateMakefile(outputFile, outputFileName, makefilePathToOS) # use SEAL application's config file as the basis try:", "not plyModuleOK: if os.name == 'posix': installStr = \"Make sure you have installed", "getopt.GetoptError as err: # print help information and exit: print (str(err)) # will", "find code generator for target OS {0}'.format(targetOS)) exitProgram(1) if outputFileName == '-': g.generate(sys.stdout)", "character, creates problems in makefile where this path is inserted else: makefilePathToOS =", "'coll'), makefilePathToOS) elif g.isComponentUsed(\"radio\"): g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS) if g.isComponentUsed(\"sdcard\"): g.generateRaw2Csv(outputDirName, os.path.join(selfDirname, 'raw2csv-template.py')) return", "BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES", "(because /usr/bin/env python does not work when called from IDE on Windows) #", "disclaimer. # * Redistributions in binary form must reproduce the above copyright #", "pathToOS, 'tools')) sys.path.append(os.path.join(selfDirname, pathToOS, 'tools', 'seal', 'components')) from seal import generator # in", "source code must retain the above copyright notice, # this list of conditions", "that the following conditions are met: # * Redistributions of source code must", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF", "copyright # notice, this list of conditions and the following disclaimer in the", "(installStr) return False return True def printLine(line): sys.stderr.write(line) def help(isError): sys.stderr.write(\"Usage:\\n\") sys.stderr.write(\" -a", "True if len(args): inputFileName = args[0] args = args[1:] if len(args): sys.stderr.write(\"Too many", "above copyright notice, # this list of conditions and the following disclaimer. #", "* Redistributions of source code must retain the above copyright notice, # this" ]
[ ".. import configurator_enums import template import optimize import launch from os import path", "__init__(self, client: 'DBClient'): super().__init__(client) # FIXME: Fragile path for refactoring self.config_root = path.abspath(path.join(path.dirname(__file__),", "\"-U\", \"postgres\", \"-f\", \"/init_table.sql\"]) if not re.search(\"now connected\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-c\",", "def execute(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client is", "\"/init_table.sql\") while re.search(\"error\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\"])): time.sleep(1) kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-f\",", "configurator_enums import template import optimize import launch from os import path import yaml", "prepare(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb:", "FIXME: Fragile path for refactoring self.config_root = path.abspath(path.join(path.dirname(__file__), \"../../../config\", \"target_spilo_postgres\")) def deploy(self, config:", "wait_for_ready=False) kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"cluster-level-rbac-patch.yaml\"), namespaced=False) # Need to wait manually because zalando postgres operator", "path for refactoring self.config_root = path.abspath(path.join(path.dirname(__file__), \"../../../config\", \"target_spilo_postgres\")) def deploy(self, config: Dict[str, object],", "assert(len(pod_ids[\"spilo\"]) > 0) kube_context.copy_to_pod(pod_ids[\"spilo\"][0], path.join(self.config_root, \"client_ycsb\", \"init_table.sql\"), \"/init_table.sql\") while re.search(\"error\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\",", "Loader=yaml.SafeLoader) postgresql_spec = minimal_manifest_yaml[\"spec\"][\"postgresql\"] if \"parameters\" not in postgresql_spec: # convert to string", "== \"postgres\": config[\"postgres_config\"][param] = v else: warnings.warn(\"Unrecognized {0} parameter: {1}\".format(identifier, param)) except Exception:", "to get StatefulSets kube_context._sts_wait(\"acid-minimal-cluster\", config[\"postgres_replicas\"]) def prepare(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids:", "if identifier == \"postgres\": config[\"postgres_config\"][param] = v else: warnings.warn(\"Unrecognized {0} parameter: {1}\".format(identifier, param))", "the postgresql crd spec only accepts string type postgresql_spec[\"parameters\"] = {k: str(v) for", "TODO: This is only needed for the next line, clean up later? with", "for k, v in config[\"postgres_config\"].items()} else: postgresql_spec[\"parameters\"].update({k: str(v) for k, v in config[\"postgres_config\"].items()})", "kube_context._sts_wait(\"acid-minimal-cluster\", config[\"postgres_replicas\"]) def prepare(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if", "path.join(self.config_root, \"client_ycsb\", \"init_table.sql\"), \"/init_table.sql\") while re.search(\"error\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\"])): time.sleep(1) kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\",", "not re.search(\"now connected\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-c\", r\"\\c test\"])): raise Exception(\"Table did", "a CustomResourceDefinition that is not easily parseable to get StatefulSets kube_context._sts_wait(\"acid-minimal-cluster\", config[\"postgres_replicas\"]) def", "zalando postgres operator uses a CustomResourceDefinition that is not easily parseable to get", "Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb: # Not necessary to do anything once", "manifest_config.seek(0) manifest_config.truncate(0) manifest_config.write(yaml.dump(minimal_manifest_yaml)) # Waiting not necessary for CRD kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"zalando\", \"manifests\", \"postgresql.crd.yaml\"),", "import launch from os import path import yaml import warnings import time import", "in config[\"postgres_config\"].items()}) manifest_config.seek(0) manifest_config.truncate(0) manifest_config.write(yaml.dump(minimal_manifest_yaml)) # Waiting not necessary for CRD kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"zalando\",", "kube_context.apply_kubectl_yaml_config(kubeconfig_dir.name, wait_for_ready=False) kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"cluster-level-rbac-patch.yaml\"), namespaced=False) # Need to wait manually because zalando postgres", "import configurator_enums import template import optimize import launch from os import path import", "config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb: #", "Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb: # Not", "This is only needed for the next line, clean up later? with open(path.join(kubeconfig_dir.name,", "object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb: # Not necessary", "get StatefulSets kube_context._sts_wait(\"acid-minimal-cluster\", config[\"postgres_replicas\"]) def prepare(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str,", "[\"psql\", \"-U\", \"postgres\", \"-c\", r\"\\c test\"])): raise Exception(\"Table did not properly initialize. Logs:\\n{}\".format(kube_context.kubectl_subprocess([\"logs\",", "initialize. Logs:\\n{}\".format(kube_context.kubectl_subprocess([\"logs\", pod_ids[\"spilo\"][0]]))) return warnings.warn(\"Unable to prepare, no client match.\") def execute(self, config:", "optimize import launch from os import path import yaml import warnings import time", "= k.split(\":\") if identifier == \"postgres\": config[\"postgres_config\"][param] = v else: warnings.warn(\"Unrecognized {0} parameter:", "needed for the next line, clean up later? with open(path.join(kubeconfig_dir.name, \"minimal-manifest.yaml\"), \"r+\") as", "def deploy(self, config: Dict[str, object], kube_context: launch.KubeContext): config.update({ \"namespace_name\": kube_context.namespace_name }) for k,", "# FIXME: Fragile path for refactoring self.config_root = path.abspath(path.join(path.dirname(__file__), \"../../../config\", \"target_spilo_postgres\")) def deploy(self,", "import path import yaml import warnings import time import re class SpiloPostgresConfigurator(_target_configurator_base.TargetConfigurator): label", "line, clean up later? with open(path.join(kubeconfig_dir.name, \"minimal-manifest.yaml\"), \"r+\") as manifest_config: minimal_manifest_yaml = yaml.load(manifest_config,", "with open(path.join(kubeconfig_dir.name, \"minimal-manifest.yaml\"), \"r+\") as manifest_config: minimal_manifest_yaml = yaml.load(manifest_config, Loader=yaml.SafeLoader) postgresql_spec = minimal_manifest_yaml[\"spec\"][\"postgresql\"]", "yaml.load(manifest_config, Loader=yaml.SafeLoader) postgresql_spec = minimal_manifest_yaml[\"spec\"][\"postgresql\"] if \"parameters\" not in postgresql_spec: # convert to", "str(v) for k, v in config[\"postgres_config\"].items()} else: postgresql_spec[\"parameters\"].update({k: str(v) for k, v in", "v in config[\"postgres_config\"].items()}) manifest_config.seek(0) manifest_config.truncate(0) manifest_config.write(yaml.dump(minimal_manifest_yaml)) # Waiting not necessary for CRD kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name,", "kube_context: launch.KubeContext): config.update({ \"namespace_name\": kube_context.namespace_name }) for k, v in config[\"param_config\"].items(): try: identifier,", "# Need to wait manually because zalando postgres operator uses a CustomResourceDefinition that", "import warnings import time import re class SpiloPostgresConfigurator(_target_configurator_base.TargetConfigurator): label = \"spilo\" identifier_type =", "minimal_manifest_yaml = yaml.load(manifest_config, Loader=yaml.SafeLoader) postgresql_spec = minimal_manifest_yaml[\"spec\"][\"postgresql\"] if \"parameters\" not in postgresql_spec: #", "\"client_ycsb\", \"init_table.sql\"), \"/init_table.sql\") while re.search(\"error\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\"])): time.sleep(1) kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\",", "that is not easily parseable to get StatefulSets kube_context._sts_wait(\"acid-minimal-cluster\", config[\"postgres_replicas\"]) def prepare(self, config:", "launch from os import path import yaml import warnings import time import re", "later? with open(path.join(kubeconfig_dir.name, \"minimal-manifest.yaml\"), \"r+\") as manifest_config: minimal_manifest_yaml = yaml.load(manifest_config, Loader=yaml.SafeLoader) postgresql_spec =", "kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb: assert(len(pod_ids[\"spilo\"]) > 0) kube_context.copy_to_pod(pod_ids[\"spilo\"][0],", "{1}\".format(identifier, param)) except Exception: warnings.warn(\"Unrecognized parameter: {}\".format(k)) continue kubeconfig_dir = template.get_tempdir_with_config(path.join(self.config_root, \"kubernetes\"), config)", "open(path.join(kubeconfig_dir.name, \"minimal-manifest.yaml\"), \"r+\") as manifest_config: minimal_manifest_yaml = yaml.load(manifest_config, Loader=yaml.SafeLoader) postgresql_spec = minimal_manifest_yaml[\"spec\"][\"postgresql\"] if", "for k, v in config[\"postgres_config\"].items()}) manifest_config.seek(0) manifest_config.truncate(0) manifest_config.write(yaml.dump(minimal_manifest_yaml)) # Waiting not necessary for", "identifier == \"postgres\": config[\"postgres_config\"][param] = v else: warnings.warn(\"Unrecognized {0} parameter: {1}\".format(identifier, param)) except", "[\"psql\", \"-U\", \"postgres\", \"-f\", \"/init_table.sql\"]) if not re.search(\"now connected\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\",", "in config[\"postgres_config\"].items()} else: postgresql_spec[\"parameters\"].update({k: str(v) for k, v in config[\"postgres_config\"].items()}) manifest_config.seek(0) manifest_config.truncate(0) manifest_config.write(yaml.dump(minimal_manifest_yaml))", "label = \"spilo\" identifier_type = launch.IdentifierType.application pod_fetch_dict = { \"spilo\": launch.IdentifierType.application } def", "\"-c\", r\"\\c test\"])): raise Exception(\"Table did not properly initialize. Logs:\\n{}\".format(kube_context.kubectl_subprocess([\"logs\", pod_ids[\"spilo\"][0]]))) return warnings.warn(\"Unable", "easily parseable to get StatefulSets kube_context._sts_wait(\"acid-minimal-cluster\", config[\"postgres_replicas\"]) def prepare(self, config: Dict[str, object], kube_context:", "config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb: assert(len(pod_ids[\"spilo\"])", "config[\"postgres_config\"][param] = v else: warnings.warn(\"Unrecognized {0} parameter: {1}\".format(identifier, param)) except Exception: warnings.warn(\"Unrecognized parameter:", "= yaml.load(manifest_config, Loader=yaml.SafeLoader) postgresql_spec = minimal_manifest_yaml[\"spec\"][\"postgresql\"] if \"parameters\" not in postgresql_spec: # convert", "not in postgresql_spec: # convert to string since the postgresql crd spec only", "kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"zalando\", \"manifests\", \"postgresql.crd.yaml\"), wait_for_ready=False) time.sleep(1) kube_context.apply_kubectl_yaml_config(kubeconfig_dir.name, wait_for_ready=False) kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"cluster-level-rbac-patch.yaml\"), namespaced=False) # Need", "CRD kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"zalando\", \"manifests\", \"postgresql.crd.yaml\"), wait_for_ready=False) time.sleep(1) kube_context.apply_kubectl_yaml_config(kubeconfig_dir.name, wait_for_ready=False) kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"cluster-level-rbac-patch.yaml\"), namespaced=False) #", "Exception: warnings.warn(\"Unrecognized parameter: {}\".format(k)) continue kubeconfig_dir = template.get_tempdir_with_config(path.join(self.config_root, \"kubernetes\"), config) self.open_temp_dirs.append(kubeconfig_dir) # TODO:", "is not easily parseable to get StatefulSets kube_context._sts_wait(\"acid-minimal-cluster\", config[\"postgres_replicas\"]) def prepare(self, config: Dict[str,", "r\"\\c test\"])): raise Exception(\"Table did not properly initialize. Logs:\\n{}\".format(kube_context.kubectl_subprocess([\"logs\", pod_ids[\"spilo\"][0]]))) return warnings.warn(\"Unable to", "uses a CustomResourceDefinition that is not easily parseable to get StatefulSets kube_context._sts_wait(\"acid-minimal-cluster\", config[\"postgres_replicas\"])", "= {k: str(v) for k, v in config[\"postgres_config\"].items()} else: postgresql_spec[\"parameters\"].update({k: str(v) for k,", "convert to string since the postgresql crd spec only accepts string type postgresql_spec[\"parameters\"]", "k, v in config[\"postgres_config\"].items()}) manifest_config.seek(0) manifest_config.truncate(0) manifest_config.write(yaml.dump(minimal_manifest_yaml)) # Waiting not necessary for CRD", "\"manifests\", \"postgresql.crd.yaml\"), wait_for_ready=False) time.sleep(1) kube_context.apply_kubectl_yaml_config(kubeconfig_dir.name, wait_for_ready=False) kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"cluster-level-rbac-patch.yaml\"), namespaced=False) # Need to wait", "pod_ids: Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb: # Not necessary to do anything", "pod_ids[\"spilo\"][0]]))) return warnings.warn(\"Unable to prepare, no client match.\") def execute(self, config: Dict[str, object],", "config) self.open_temp_dirs.append(kubeconfig_dir) # TODO: This is only needed for the next line, clean", "v else: warnings.warn(\"Unrecognized {0} parameter: {1}\".format(identifier, param)) except Exception: warnings.warn(\"Unrecognized parameter: {}\".format(k)) continue", "str(v) for k, v in config[\"postgres_config\"].items()}) manifest_config.seek(0) manifest_config.truncate(0) manifest_config.write(yaml.dump(minimal_manifest_yaml)) # Waiting not necessary", "= template.get_tempdir_with_config(path.join(self.config_root, \"kubernetes\"), config) self.open_temp_dirs.append(kubeconfig_dir) # TODO: This is only needed for the", "\"postgres\"])): time.sleep(1) kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-f\", \"/init_table.sql\"]) if not re.search(\"now connected\", kube_context.run_command(pod_ids[\"spilo\"][0],", "accepts string type postgresql_spec[\"parameters\"] = {k: str(v) for k, v in config[\"postgres_config\"].items()} else:", "= minimal_manifest_yaml[\"spec\"][\"postgresql\"] if \"parameters\" not in postgresql_spec: # convert to string since the", "\"zalando\", \"manifests\", \"postgresql.crd.yaml\"), wait_for_ready=False) time.sleep(1) kube_context.apply_kubectl_yaml_config(kubeconfig_dir.name, wait_for_ready=False) kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"cluster-level-rbac-patch.yaml\"), namespaced=False) # Need to", "k, v in config[\"param_config\"].items(): try: identifier, param = k.split(\":\") if identifier == \"postgres\":", "class SpiloPostgresConfigurator(_target_configurator_base.TargetConfigurator): label = \"spilo\" identifier_type = launch.IdentifierType.application pod_fetch_dict = { \"spilo\": launch.IdentifierType.application", "continue kubeconfig_dir = template.get_tempdir_with_config(path.join(self.config_root, \"kubernetes\"), config) self.open_temp_dirs.append(kubeconfig_dir) # TODO: This is only needed", "launch.KubeContext): config.update({ \"namespace_name\": kube_context.namespace_name }) for k, v in config[\"param_config\"].items(): try: identifier, param", "Need to wait manually because zalando postgres operator uses a CustomResourceDefinition that is", "\"kubernetes\"), config) self.open_temp_dirs.append(kubeconfig_dir) # TODO: This is only needed for the next line,", "Logs:\\n{}\".format(kube_context.kubectl_subprocess([\"logs\", pod_ids[\"spilo\"][0]]))) return warnings.warn(\"Unable to prepare, no client match.\") def execute(self, config: Dict[str,", "execute(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb:", "object], kube_context: launch.KubeContext): config.update({ \"namespace_name\": kube_context.namespace_name }) for k, v in config[\"param_config\"].items(): try:", "once tables are configured for ycsb return warnings.warn(\"Unable to execute, no client match.\")", "only accepts string type postgresql_spec[\"parameters\"] = {k: str(v) for k, v in config[\"postgres_config\"].items()}", "= launch.IdentifierType.application pod_fetch_dict = { \"spilo\": launch.IdentifierType.application } def __init__(self, client: 'DBClient'): super().__init__(client)", "client: 'DBClient'): super().__init__(client) # FIXME: Fragile path for refactoring self.config_root = path.abspath(path.join(path.dirname(__file__), \"../../../config\",", "kube_context.copy_to_pod(pod_ids[\"spilo\"][0], path.join(self.config_root, \"client_ycsb\", \"init_table.sql\"), \"/init_table.sql\") while re.search(\"error\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\"])): time.sleep(1) kube_context.run_command(pod_ids[\"spilo\"][0],", "parameter: {1}\".format(identifier, param)) except Exception: warnings.warn(\"Unrecognized parameter: {}\".format(k)) continue kubeconfig_dir = template.get_tempdir_with_config(path.join(self.config_root, \"kubernetes\"),", "else: warnings.warn(\"Unrecognized {0} parameter: {1}\".format(identifier, param)) except Exception: warnings.warn(\"Unrecognized parameter: {}\".format(k)) continue kubeconfig_dir", "self.open_temp_dirs.append(kubeconfig_dir) # TODO: This is only needed for the next line, clean up", "'DBClient'): super().__init__(client) # FIXME: Fragile path for refactoring self.config_root = path.abspath(path.join(path.dirname(__file__), \"../../../config\", \"target_spilo_postgres\"))", "kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb: # Not necessary to", "for the next line, clean up later? with open(path.join(kubeconfig_dir.name, \"minimal-manifest.yaml\"), \"r+\") as manifest_config:", "config[\"postgres_replicas\"]) def prepare(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client", "postgresql_spec[\"parameters\"].update({k: str(v) for k, v in config[\"postgres_config\"].items()}) manifest_config.seek(0) manifest_config.truncate(0) manifest_config.write(yaml.dump(minimal_manifest_yaml)) # Waiting not", "is configurator_enums.DBClient.ycsb: # Not necessary to do anything once tables are configured for", "yaml import warnings import time import re class SpiloPostgresConfigurator(_target_configurator_base.TargetConfigurator): label = \"spilo\" identifier_type", "identifier_type = launch.IdentifierType.application pod_fetch_dict = { \"spilo\": launch.IdentifierType.application } def __init__(self, client: 'DBClient'):", "string type postgresql_spec[\"parameters\"] = {k: str(v) for k, v in config[\"postgres_config\"].items()} else: postgresql_spec[\"parameters\"].update({k:", "Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb: assert(len(pod_ids[\"spilo\"]) > 0) kube_context.copy_to_pod(pod_ids[\"spilo\"][0], path.join(self.config_root, \"client_ycsb\", \"init_table.sql\"),", "minimal_manifest_yaml[\"spec\"][\"postgresql\"] if \"parameters\" not in postgresql_spec: # convert to string since the postgresql", "as manifest_config: minimal_manifest_yaml = yaml.load(manifest_config, Loader=yaml.SafeLoader) postgresql_spec = minimal_manifest_yaml[\"spec\"][\"postgresql\"] if \"parameters\" not in", "= { \"spilo\": launch.IdentifierType.application } def __init__(self, client: 'DBClient'): super().__init__(client) # FIXME: Fragile", "} def __init__(self, client: 'DBClient'): super().__init__(client) # FIXME: Fragile path for refactoring self.config_root", "wait manually because zalando postgres operator uses a CustomResourceDefinition that is not easily", "config[\"postgres_config\"].items()} else: postgresql_spec[\"parameters\"].update({k: str(v) for k, v in config[\"postgres_config\"].items()}) manifest_config.seek(0) manifest_config.truncate(0) manifest_config.write(yaml.dump(minimal_manifest_yaml)) #", "StatefulSets kube_context._sts_wait(\"acid-minimal-cluster\", config[\"postgres_replicas\"]) def prepare(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]):", "Dict from . import _target_configurator_base from .. import configurator_enums import template import optimize", "List[str]]): if self.client is configurator_enums.DBClient.ycsb: # Not necessary to do anything once tables", "configurator_enums.DBClient.ycsb: # Not necessary to do anything once tables are configured for ycsb", "to wait manually because zalando postgres operator uses a CustomResourceDefinition that is not", "template import optimize import launch from os import path import yaml import warnings", "Dict[str, object], kube_context: launch.KubeContext): config.update({ \"namespace_name\": kube_context.namespace_name }) for k, v in config[\"param_config\"].items():", "import List, Dict from . import _target_configurator_base from .. import configurator_enums import template", "{k: str(v) for k, v in config[\"postgres_config\"].items()} else: postgresql_spec[\"parameters\"].update({k: str(v) for k, v", "properly initialize. Logs:\\n{}\".format(kube_context.kubectl_subprocess([\"logs\", pod_ids[\"spilo\"][0]]))) return warnings.warn(\"Unable to prepare, no client match.\") def execute(self,", "Waiting not necessary for CRD kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"zalando\", \"manifests\", \"postgresql.crd.yaml\"), wait_for_ready=False) time.sleep(1) kube_context.apply_kubectl_yaml_config(kubeconfig_dir.name, wait_for_ready=False)", "crd spec only accepts string type postgresql_spec[\"parameters\"] = {k: str(v) for k, v", "manually because zalando postgres operator uses a CustomResourceDefinition that is not easily parseable", "client match.\") def execute(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if", "import optimize import launch from os import path import yaml import warnings import", "path import yaml import warnings import time import re class SpiloPostgresConfigurator(_target_configurator_base.TargetConfigurator): label =", "kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-c\", r\"\\c test\"])): raise Exception(\"Table did not properly initialize.", "necessary to do anything once tables are configured for ycsb return warnings.warn(\"Unable to", "else: postgresql_spec[\"parameters\"].update({k: str(v) for k, v in config[\"postgres_config\"].items()}) manifest_config.seek(0) manifest_config.truncate(0) manifest_config.write(yaml.dump(minimal_manifest_yaml)) # Waiting", "except Exception: warnings.warn(\"Unrecognized parameter: {}\".format(k)) continue kubeconfig_dir = template.get_tempdir_with_config(path.join(self.config_root, \"kubernetes\"), config) self.open_temp_dirs.append(kubeconfig_dir) #", "pod_ids: Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb: assert(len(pod_ids[\"spilo\"]) > 0) kube_context.copy_to_pod(pod_ids[\"spilo\"][0], path.join(self.config_root, \"client_ycsb\",", "SpiloPostgresConfigurator(_target_configurator_base.TargetConfigurator): label = \"spilo\" identifier_type = launch.IdentifierType.application pod_fetch_dict = { \"spilo\": launch.IdentifierType.application }", "config: Dict[str, object], kube_context: launch.KubeContext): config.update({ \"namespace_name\": kube_context.namespace_name }) for k, v in", "from typing import List, Dict from . import _target_configurator_base from .. import configurator_enums", "to do anything once tables are configured for ycsb return warnings.warn(\"Unable to execute,", "param)) except Exception: warnings.warn(\"Unrecognized parameter: {}\".format(k)) continue kubeconfig_dir = template.get_tempdir_with_config(path.join(self.config_root, \"kubernetes\"), config) self.open_temp_dirs.append(kubeconfig_dir)", "k, v in config[\"postgres_config\"].items()} else: postgresql_spec[\"parameters\"].update({k: str(v) for k, v in config[\"postgres_config\"].items()}) manifest_config.seek(0)", "kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"cluster-level-rbac-patch.yaml\"), namespaced=False) # Need to wait manually because zalando postgres operator uses", "manifest_config: minimal_manifest_yaml = yaml.load(manifest_config, Loader=yaml.SafeLoader) postgresql_spec = minimal_manifest_yaml[\"spec\"][\"postgresql\"] if \"parameters\" not in postgresql_spec:", "did not properly initialize. Logs:\\n{}\".format(kube_context.kubectl_subprocess([\"logs\", pod_ids[\"spilo\"][0]]))) return warnings.warn(\"Unable to prepare, no client match.\")", "launch.IdentifierType.application pod_fetch_dict = { \"spilo\": launch.IdentifierType.application } def __init__(self, client: 'DBClient'): super().__init__(client) #", "path.abspath(path.join(path.dirname(__file__), \"../../../config\", \"target_spilo_postgres\")) def deploy(self, config: Dict[str, object], kube_context: launch.KubeContext): config.update({ \"namespace_name\": kube_context.namespace_name", "kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\"])): time.sleep(1) kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-f\", \"/init_table.sql\"]) if not", "for CRD kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"zalando\", \"manifests\", \"postgresql.crd.yaml\"), wait_for_ready=False) time.sleep(1) kube_context.apply_kubectl_yaml_config(kubeconfig_dir.name, wait_for_ready=False) kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"cluster-level-rbac-patch.yaml\"), namespaced=False)", "no client match.\") def execute(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]):", "since the postgresql crd spec only accepts string type postgresql_spec[\"parameters\"] = {k: str(v)", "config[\"postgres_config\"].items()}) manifest_config.seek(0) manifest_config.truncate(0) manifest_config.write(yaml.dump(minimal_manifest_yaml)) # Waiting not necessary for CRD kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"zalando\", \"manifests\",", "kubeconfig_dir = template.get_tempdir_with_config(path.join(self.config_root, \"kubernetes\"), config) self.open_temp_dirs.append(kubeconfig_dir) # TODO: This is only needed for", "to prepare, no client match.\") def execute(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids:", "= \"spilo\" identifier_type = launch.IdentifierType.application pod_fetch_dict = { \"spilo\": launch.IdentifierType.application } def __init__(self,", "kube_context.namespace_name }) for k, v in config[\"param_config\"].items(): try: identifier, param = k.split(\":\") if", "not properly initialize. Logs:\\n{}\".format(kube_context.kubectl_subprocess([\"logs\", pod_ids[\"spilo\"][0]]))) return warnings.warn(\"Unable to prepare, no client match.\") def", "v in config[\"param_config\"].items(): try: identifier, param = k.split(\":\") if identifier == \"postgres\": config[\"postgres_config\"][param]", "launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb: # Not necessary to do", "while re.search(\"error\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\"])): time.sleep(1) kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-f\", \"/init_table.sql\"])", "if not re.search(\"now connected\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-c\", r\"\\c test\"])): raise Exception(\"Table", "{ \"spilo\": launch.IdentifierType.application } def __init__(self, client: 'DBClient'): super().__init__(client) # FIXME: Fragile path", "not easily parseable to get StatefulSets kube_context._sts_wait(\"acid-minimal-cluster\", config[\"postgres_replicas\"]) def prepare(self, config: Dict[str, object],", "namespaced=False) # Need to wait manually because zalando postgres operator uses a CustomResourceDefinition", "}) for k, v in config[\"param_config\"].items(): try: identifier, param = k.split(\":\") if identifier", "self.client is configurator_enums.DBClient.ycsb: # Not necessary to do anything once tables are configured", "configurator_enums.DBClient.ycsb: assert(len(pod_ids[\"spilo\"]) > 0) kube_context.copy_to_pod(pod_ids[\"spilo\"][0], path.join(self.config_root, \"client_ycsb\", \"init_table.sql\"), \"/init_table.sql\") while re.search(\"error\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\",", "match.\") def execute(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client", "identifier, param = k.split(\":\") if identifier == \"postgres\": config[\"postgres_config\"][param] = v else: warnings.warn(\"Unrecognized", "for k, v in config[\"param_config\"].items(): try: identifier, param = k.split(\":\") if identifier ==", "List, Dict from . import _target_configurator_base from .. import configurator_enums import template import", "from .. import configurator_enums import template import optimize import launch from os import", "import re class SpiloPostgresConfigurator(_target_configurator_base.TargetConfigurator): label = \"spilo\" identifier_type = launch.IdentifierType.application pod_fetch_dict = {", "postgresql_spec: # convert to string since the postgresql crd spec only accepts string", "warnings.warn(\"Unrecognized parameter: {}\".format(k)) continue kubeconfig_dir = template.get_tempdir_with_config(path.join(self.config_root, \"kubernetes\"), config) self.open_temp_dirs.append(kubeconfig_dir) # TODO: This", "object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb: assert(len(pod_ids[\"spilo\"]) > 0)", "\"/init_table.sql\"]) if not re.search(\"now connected\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-c\", r\"\\c test\"])): raise", "launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb: assert(len(pod_ids[\"spilo\"]) > 0) kube_context.copy_to_pod(pod_ids[\"spilo\"][0], path.join(self.config_root,", "typing import List, Dict from . import _target_configurator_base from .. import configurator_enums import", "next line, clean up later? with open(path.join(kubeconfig_dir.name, \"minimal-manifest.yaml\"), \"r+\") as manifest_config: minimal_manifest_yaml =", "# TODO: This is only needed for the next line, clean up later?", "deploy(self, config: Dict[str, object], kube_context: launch.KubeContext): config.update({ \"namespace_name\": kube_context.namespace_name }) for k, v", "os import path import yaml import warnings import time import re class SpiloPostgresConfigurator(_target_configurator_base.TargetConfigurator):", "\"-f\", \"/init_table.sql\"]) if not re.search(\"now connected\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-c\", r\"\\c test\"])):", "parameter: {}\".format(k)) continue kubeconfig_dir = template.get_tempdir_with_config(path.join(self.config_root, \"kubernetes\"), config) self.open_temp_dirs.append(kubeconfig_dir) # TODO: This is", "up later? with open(path.join(kubeconfig_dir.name, \"minimal-manifest.yaml\"), \"r+\") as manifest_config: minimal_manifest_yaml = yaml.load(manifest_config, Loader=yaml.SafeLoader) postgresql_spec", "re.search(\"now connected\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-c\", r\"\\c test\"])): raise Exception(\"Table did not", "in config[\"param_config\"].items(): try: identifier, param = k.split(\":\") if identifier == \"postgres\": config[\"postgres_config\"][param] =", "# Not necessary to do anything once tables are configured for ycsb return", "if \"parameters\" not in postgresql_spec: # convert to string since the postgresql crd", "Not necessary to do anything once tables are configured for ycsb return warnings.warn(\"Unable", "prepare, no client match.\") def execute(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str,", "manifest_config.write(yaml.dump(minimal_manifest_yaml)) # Waiting not necessary for CRD kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"zalando\", \"manifests\", \"postgresql.crd.yaml\"), wait_for_ready=False) time.sleep(1)", "\"postgres\", \"-c\", r\"\\c test\"])): raise Exception(\"Table did not properly initialize. Logs:\\n{}\".format(kube_context.kubectl_subprocess([\"logs\", pod_ids[\"spilo\"][0]]))) return", "Exception(\"Table did not properly initialize. Logs:\\n{}\".format(kube_context.kubectl_subprocess([\"logs\", pod_ids[\"spilo\"][0]]))) return warnings.warn(\"Unable to prepare, no client", "clean up later? with open(path.join(kubeconfig_dir.name, \"minimal-manifest.yaml\"), \"r+\") as manifest_config: minimal_manifest_yaml = yaml.load(manifest_config, Loader=yaml.SafeLoader)", "do anything once tables are configured for ycsb return warnings.warn(\"Unable to execute, no", "{0} parameter: {1}\".format(identifier, param)) except Exception: warnings.warn(\"Unrecognized parameter: {}\".format(k)) continue kubeconfig_dir = template.get_tempdir_with_config(path.join(self.config_root,", "List[str]]): if self.client is configurator_enums.DBClient.ycsb: assert(len(pod_ids[\"spilo\"]) > 0) kube_context.copy_to_pod(pod_ids[\"spilo\"][0], path.join(self.config_root, \"client_ycsb\", \"init_table.sql\"), \"/init_table.sql\")", "because zalando postgres operator uses a CustomResourceDefinition that is not easily parseable to", "launch.IdentifierType.application } def __init__(self, client: 'DBClient'): super().__init__(client) # FIXME: Fragile path for refactoring", "self.client is configurator_enums.DBClient.ycsb: assert(len(pod_ids[\"spilo\"]) > 0) kube_context.copy_to_pod(pod_ids[\"spilo\"][0], path.join(self.config_root, \"client_ycsb\", \"init_table.sql\"), \"/init_table.sql\") while re.search(\"error\",", "self.config_root = path.abspath(path.join(path.dirname(__file__), \"../../../config\", \"target_spilo_postgres\")) def deploy(self, config: Dict[str, object], kube_context: launch.KubeContext): config.update({", "re.search(\"error\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\"])): time.sleep(1) kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-f\", \"/init_table.sql\"]) if", "\"parameters\" not in postgresql_spec: # convert to string since the postgresql crd spec", "re class SpiloPostgresConfigurator(_target_configurator_base.TargetConfigurator): label = \"spilo\" identifier_type = launch.IdentifierType.application pod_fetch_dict = { \"spilo\":", "operator uses a CustomResourceDefinition that is not easily parseable to get StatefulSets kube_context._sts_wait(\"acid-minimal-cluster\",", "anything once tables are configured for ycsb return warnings.warn(\"Unable to execute, no client", "\"cluster-level-rbac-patch.yaml\"), namespaced=False) # Need to wait manually because zalando postgres operator uses a", "time import re class SpiloPostgresConfigurator(_target_configurator_base.TargetConfigurator): label = \"spilo\" identifier_type = launch.IdentifierType.application pod_fetch_dict =", "if self.client is configurator_enums.DBClient.ycsb: assert(len(pod_ids[\"spilo\"]) > 0) kube_context.copy_to_pod(pod_ids[\"spilo\"][0], path.join(self.config_root, \"client_ycsb\", \"init_table.sql\"), \"/init_table.sql\") while", "to string since the postgresql crd spec only accepts string type postgresql_spec[\"parameters\"] =", "in postgresql_spec: # convert to string since the postgresql crd spec only accepts", "config[\"param_config\"].items(): try: identifier, param = k.split(\":\") if identifier == \"postgres\": config[\"postgres_config\"][param] = v", "import time import re class SpiloPostgresConfigurator(_target_configurator_base.TargetConfigurator): label = \"spilo\" identifier_type = launch.IdentifierType.application pod_fetch_dict", "import _target_configurator_base from .. import configurator_enums import template import optimize import launch from", "spec only accepts string type postgresql_spec[\"parameters\"] = {k: str(v) for k, v in", "type postgresql_spec[\"parameters\"] = {k: str(v) for k, v in config[\"postgres_config\"].items()} else: postgresql_spec[\"parameters\"].update({k: str(v)", "test\"])): raise Exception(\"Table did not properly initialize. Logs:\\n{}\".format(kube_context.kubectl_subprocess([\"logs\", pod_ids[\"spilo\"][0]]))) return warnings.warn(\"Unable to prepare,", "super().__init__(client) # FIXME: Fragile path for refactoring self.config_root = path.abspath(path.join(path.dirname(__file__), \"../../../config\", \"target_spilo_postgres\")) def", "\"minimal-manifest.yaml\"), \"r+\") as manifest_config: minimal_manifest_yaml = yaml.load(manifest_config, Loader=yaml.SafeLoader) postgresql_spec = minimal_manifest_yaml[\"spec\"][\"postgresql\"] if \"parameters\"", "\"target_spilo_postgres\")) def deploy(self, config: Dict[str, object], kube_context: launch.KubeContext): config.update({ \"namespace_name\": kube_context.namespace_name }) for", "def __init__(self, client: 'DBClient'): super().__init__(client) # FIXME: Fragile path for refactoring self.config_root =", "CustomResourceDefinition that is not easily parseable to get StatefulSets kube_context._sts_wait(\"acid-minimal-cluster\", config[\"postgres_replicas\"]) def prepare(self,", "necessary for CRD kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"zalando\", \"manifests\", \"postgresql.crd.yaml\"), wait_for_ready=False) time.sleep(1) kube_context.apply_kubectl_yaml_config(kubeconfig_dir.name, wait_for_ready=False) kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"cluster-level-rbac-patch.yaml\"),", "# Waiting not necessary for CRD kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"zalando\", \"manifests\", \"postgresql.crd.yaml\"), wait_for_ready=False) time.sleep(1) kube_context.apply_kubectl_yaml_config(kubeconfig_dir.name,", "for refactoring self.config_root = path.abspath(path.join(path.dirname(__file__), \"../../../config\", \"target_spilo_postgres\")) def deploy(self, config: Dict[str, object], kube_context:", "postgresql crd spec only accepts string type postgresql_spec[\"parameters\"] = {k: str(v) for k,", "manifest_config.truncate(0) manifest_config.write(yaml.dump(minimal_manifest_yaml)) # Waiting not necessary for CRD kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"zalando\", \"manifests\", \"postgresql.crd.yaml\"), wait_for_ready=False)", "postgres operator uses a CustomResourceDefinition that is not easily parseable to get StatefulSets", "\"init_table.sql\"), \"/init_table.sql\") while re.search(\"error\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\"])): time.sleep(1) kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\",", "template.get_tempdir_with_config(path.join(self.config_root, \"kubernetes\"), config) self.open_temp_dirs.append(kubeconfig_dir) # TODO: This is only needed for the next", ". import _target_configurator_base from .. import configurator_enums import template import optimize import launch", "wait_for_ready=False) time.sleep(1) kube_context.apply_kubectl_yaml_config(kubeconfig_dir.name, wait_for_ready=False) kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"cluster-level-rbac-patch.yaml\"), namespaced=False) # Need to wait manually because", "Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client is configurator_enums.DBClient.ycsb: assert(len(pod_ids[\"spilo\"]) >", "raise Exception(\"Table did not properly initialize. Logs:\\n{}\".format(kube_context.kubectl_subprocess([\"logs\", pod_ids[\"spilo\"][0]]))) return warnings.warn(\"Unable to prepare, no", "the next line, clean up later? with open(path.join(kubeconfig_dir.name, \"minimal-manifest.yaml\"), \"r+\") as manifest_config: minimal_manifest_yaml", "is configurator_enums.DBClient.ycsb: assert(len(pod_ids[\"spilo\"]) > 0) kube_context.copy_to_pod(pod_ids[\"spilo\"][0], path.join(self.config_root, \"client_ycsb\", \"init_table.sql\"), \"/init_table.sql\") while re.search(\"error\", kube_context.run_command(pod_ids[\"spilo\"][0],", "= v else: warnings.warn(\"Unrecognized {0} parameter: {1}\".format(identifier, param)) except Exception: warnings.warn(\"Unrecognized parameter: {}\".format(k))", "\"-U\", \"postgres\"])): time.sleep(1) kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-f\", \"/init_table.sql\"]) if not re.search(\"now connected\",", "= path.abspath(path.join(path.dirname(__file__), \"../../../config\", \"target_spilo_postgres\")) def deploy(self, config: Dict[str, object], kube_context: launch.KubeContext): config.update({ \"namespace_name\":", "v in config[\"postgres_config\"].items()} else: postgresql_spec[\"parameters\"].update({k: str(v) for k, v in config[\"postgres_config\"].items()}) manifest_config.seek(0) manifest_config.truncate(0)", "def prepare(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]): if self.client is", "postgresql_spec[\"parameters\"] = {k: str(v) for k, v in config[\"postgres_config\"].items()} else: postgresql_spec[\"parameters\"].update({k: str(v) for", "is only needed for the next line, clean up later? with open(path.join(kubeconfig_dir.name, \"minimal-manifest.yaml\"),", "# convert to string since the postgresql crd spec only accepts string type", "from . import _target_configurator_base from .. import configurator_enums import template import optimize import", "warnings.warn(\"Unable to prepare, no client match.\") def execute(self, config: Dict[str, object], kube_context: launch.KubeContext,", "from os import path import yaml import warnings import time import re class", "warnings import time import re class SpiloPostgresConfigurator(_target_configurator_base.TargetConfigurator): label = \"spilo\" identifier_type = launch.IdentifierType.application", "pod_fetch_dict = { \"spilo\": launch.IdentifierType.application } def __init__(self, client: 'DBClient'): super().__init__(client) # FIXME:", "config.update({ \"namespace_name\": kube_context.namespace_name }) for k, v in config[\"param_config\"].items(): try: identifier, param =", "\"-U\", \"postgres\", \"-c\", r\"\\c test\"])): raise Exception(\"Table did not properly initialize. Logs:\\n{}\".format(kube_context.kubectl_subprocess([\"logs\", pod_ids[\"spilo\"][0]])))", "Fragile path for refactoring self.config_root = path.abspath(path.join(path.dirname(__file__), \"../../../config\", \"target_spilo_postgres\")) def deploy(self, config: Dict[str,", "kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-f\", \"/init_table.sql\"]) if not re.search(\"now connected\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\",", "try: identifier, param = k.split(\":\") if identifier == \"postgres\": config[\"postgres_config\"][param] = v else:", "param = k.split(\":\") if identifier == \"postgres\": config[\"postgres_config\"][param] = v else: warnings.warn(\"Unrecognized {0}", "warnings.warn(\"Unrecognized {0} parameter: {1}\".format(identifier, param)) except Exception: warnings.warn(\"Unrecognized parameter: {}\".format(k)) continue kubeconfig_dir =", "string since the postgresql crd spec only accepts string type postgresql_spec[\"parameters\"] = {k:", "_target_configurator_base from .. import configurator_enums import template import optimize import launch from os", "\"postgres\", \"-f\", \"/init_table.sql\"]) if not re.search(\"now connected\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-c\", r\"\\c", "\"spilo\": launch.IdentifierType.application } def __init__(self, client: 'DBClient'): super().__init__(client) # FIXME: Fragile path for", "if self.client is configurator_enums.DBClient.ycsb: # Not necessary to do anything once tables are", "k.split(\":\") if identifier == \"postgres\": config[\"postgres_config\"][param] = v else: warnings.warn(\"Unrecognized {0} parameter: {1}\".format(identifier,", "\"namespace_name\": kube_context.namespace_name }) for k, v in config[\"param_config\"].items(): try: identifier, param = k.split(\":\")", "parseable to get StatefulSets kube_context._sts_wait(\"acid-minimal-cluster\", config[\"postgres_replicas\"]) def prepare(self, config: Dict[str, object], kube_context: launch.KubeContext,", "[\"psql\", \"-U\", \"postgres\"])): time.sleep(1) kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-f\", \"/init_table.sql\"]) if not re.search(\"now", "postgresql_spec = minimal_manifest_yaml[\"spec\"][\"postgresql\"] if \"parameters\" not in postgresql_spec: # convert to string since", "\"r+\") as manifest_config: minimal_manifest_yaml = yaml.load(manifest_config, Loader=yaml.SafeLoader) postgresql_spec = minimal_manifest_yaml[\"spec\"][\"postgresql\"] if \"parameters\" not", "connected\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-c\", r\"\\c test\"])): raise Exception(\"Table did not properly", "\"postgresql.crd.yaml\"), wait_for_ready=False) time.sleep(1) kube_context.apply_kubectl_yaml_config(kubeconfig_dir.name, wait_for_ready=False) kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"cluster-level-rbac-patch.yaml\"), namespaced=False) # Need to wait manually", "time.sleep(1) kube_context.apply_kubectl_yaml_config(kubeconfig_dir.name, wait_for_ready=False) kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"cluster-level-rbac-patch.yaml\"), namespaced=False) # Need to wait manually because zalando", "only needed for the next line, clean up later? with open(path.join(kubeconfig_dir.name, \"minimal-manifest.yaml\"), \"r+\")", "import template import optimize import launch from os import path import yaml import", "\"spilo\" identifier_type = launch.IdentifierType.application pod_fetch_dict = { \"spilo\": launch.IdentifierType.application } def __init__(self, client:", "\"postgres\": config[\"postgres_config\"][param] = v else: warnings.warn(\"Unrecognized {0} parameter: {1}\".format(identifier, param)) except Exception: warnings.warn(\"Unrecognized", "> 0) kube_context.copy_to_pod(pod_ids[\"spilo\"][0], path.join(self.config_root, \"client_ycsb\", \"init_table.sql\"), \"/init_table.sql\") while re.search(\"error\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\"])):", "time.sleep(1) kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\", \"-f\", \"/init_table.sql\"]) if not re.search(\"now connected\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\",", "import yaml import warnings import time import re class SpiloPostgresConfigurator(_target_configurator_base.TargetConfigurator): label = \"spilo\"", "0) kube_context.copy_to_pod(pod_ids[\"spilo\"][0], path.join(self.config_root, \"client_ycsb\", \"init_table.sql\"), \"/init_table.sql\") while re.search(\"error\", kube_context.run_command(pod_ids[\"spilo\"][0], [\"psql\", \"-U\", \"postgres\"])): time.sleep(1)", "return warnings.warn(\"Unable to prepare, no client match.\") def execute(self, config: Dict[str, object], kube_context:", "{}\".format(k)) continue kubeconfig_dir = template.get_tempdir_with_config(path.join(self.config_root, \"kubernetes\"), config) self.open_temp_dirs.append(kubeconfig_dir) # TODO: This is only", "\"../../../config\", \"target_spilo_postgres\")) def deploy(self, config: Dict[str, object], kube_context: launch.KubeContext): config.update({ \"namespace_name\": kube_context.namespace_name })", "refactoring self.config_root = path.abspath(path.join(path.dirname(__file__), \"../../../config\", \"target_spilo_postgres\")) def deploy(self, config: Dict[str, object], kube_context: launch.KubeContext):", "not necessary for CRD kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, \"zalando\", \"manifests\", \"postgresql.crd.yaml\"), wait_for_ready=False) time.sleep(1) kube_context.apply_kubectl_yaml_config(kubeconfig_dir.name, wait_for_ready=False) kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name," ]
[ "the `Get_rank` method \"\"\" assert self.mpidummy.Get_rank() == 0 def test_Get_size(self): \"\"\"Test the `Get_size`", "\"\"\" def setup(self): \"\"\"Setup the MPIDummy object \"\"\" self.mpidummy = MPIDummy() def test_Get_rank(self):", "== 0 def test_Get_size(self): \"\"\"Test the `Get_size` function \"\"\" assert self.mpidummy.Get_size() == 1", "import MPIDummy class TestMPIDummp(object): \"\"\"Test the MPIDummpy class \"\"\" def setup(self): \"\"\"Setup the", "self.mpidummy = MPIDummy() def test_Get_rank(self): \"\"\"Test the `Get_rank` method \"\"\" assert self.mpidummy.Get_rank() ==", "from PTMCMCSampler.nompi4py import MPIDummy class TestMPIDummp(object): \"\"\"Test the MPIDummpy class \"\"\" def setup(self):", "= MPIDummy() def test_Get_rank(self): \"\"\"Test the `Get_rank` method \"\"\" assert self.mpidummy.Get_rank() == 0", "`Get_rank` method \"\"\" assert self.mpidummy.Get_rank() == 0 def test_Get_size(self): \"\"\"Test the `Get_size` function", "\"\"\" self.mpidummy = MPIDummy() def test_Get_rank(self): \"\"\"Test the `Get_rank` method \"\"\" assert self.mpidummy.Get_rank()", "test_Get_rank(self): \"\"\"Test the `Get_rank` method \"\"\" assert self.mpidummy.Get_rank() == 0 def test_Get_size(self): \"\"\"Test", "\"\"\"Setup the MPIDummy object \"\"\" self.mpidummy = MPIDummy() def test_Get_rank(self): \"\"\"Test the `Get_rank`", "method \"\"\" assert self.mpidummy.Get_rank() == 0 def test_Get_size(self): \"\"\"Test the `Get_size` function \"\"\"", "assert self.mpidummy.Get_rank() == 0 def test_Get_size(self): \"\"\"Test the `Get_size` function \"\"\" assert self.mpidummy.Get_size()", "\"\"\" assert self.mpidummy.Get_rank() == 0 def test_Get_size(self): \"\"\"Test the `Get_size` function \"\"\" assert", "import pytest from PTMCMCSampler.nompi4py import MPIDummy class TestMPIDummp(object): \"\"\"Test the MPIDummpy class \"\"\"", "the MPIDummy object \"\"\" self.mpidummy = MPIDummy() def test_Get_rank(self): \"\"\"Test the `Get_rank` method", "MPIDummy class TestMPIDummp(object): \"\"\"Test the MPIDummpy class \"\"\" def setup(self): \"\"\"Setup the MPIDummy", "class \"\"\" def setup(self): \"\"\"Setup the MPIDummy object \"\"\" self.mpidummy = MPIDummy() def", "TestMPIDummp(object): \"\"\"Test the MPIDummpy class \"\"\" def setup(self): \"\"\"Setup the MPIDummy object \"\"\"", "object \"\"\" self.mpidummy = MPIDummy() def test_Get_rank(self): \"\"\"Test the `Get_rank` method \"\"\" assert", "MPIDummpy class \"\"\" def setup(self): \"\"\"Setup the MPIDummy object \"\"\" self.mpidummy = MPIDummy()", "def test_Get_rank(self): \"\"\"Test the `Get_rank` method \"\"\" assert self.mpidummy.Get_rank() == 0 def test_Get_size(self):", "PTMCMCSampler.nompi4py import MPIDummy class TestMPIDummp(object): \"\"\"Test the MPIDummpy class \"\"\" def setup(self): \"\"\"Setup", "\"\"\"Test the MPIDummpy class \"\"\" def setup(self): \"\"\"Setup the MPIDummy object \"\"\" self.mpidummy", "pytest from PTMCMCSampler.nompi4py import MPIDummy class TestMPIDummp(object): \"\"\"Test the MPIDummpy class \"\"\" def", "the MPIDummpy class \"\"\" def setup(self): \"\"\"Setup the MPIDummy object \"\"\" self.mpidummy =", "setup(self): \"\"\"Setup the MPIDummy object \"\"\" self.mpidummy = MPIDummy() def test_Get_rank(self): \"\"\"Test the", "MPIDummy object \"\"\" self.mpidummy = MPIDummy() def test_Get_rank(self): \"\"\"Test the `Get_rank` method \"\"\"", "self.mpidummy.Get_rank() == 0 def test_Get_size(self): \"\"\"Test the `Get_size` function \"\"\" assert self.mpidummy.Get_size() ==", "def setup(self): \"\"\"Setup the MPIDummy object \"\"\" self.mpidummy = MPIDummy() def test_Get_rank(self): \"\"\"Test", "MPIDummy() def test_Get_rank(self): \"\"\"Test the `Get_rank` method \"\"\" assert self.mpidummy.Get_rank() == 0 def", "class TestMPIDummp(object): \"\"\"Test the MPIDummpy class \"\"\" def setup(self): \"\"\"Setup the MPIDummy object", "\"\"\"Test the `Get_rank` method \"\"\" assert self.mpidummy.Get_rank() == 0 def test_Get_size(self): \"\"\"Test the" ]
[ "hypixel import json client = hypixel.Client(key) player = client.get_player(\"SomeHypixelNon\") stats = client.run(player.get_stats()) with", "writes your stats to a JSON file. \"\"\" import hypixel import json client", "client = hypixel.Client(key) player = client.get_player(\"SomeHypixelNon\") stats = client.run(player.get_stats()) with open(jsonfile, 'w') as", "\"\"\" This program writes your stats to a JSON file. \"\"\" import hypixel", "stats = client.run(player.get_stats()) with open(jsonfile, 'w') as f: # replace jsonfile with the", "JSON file. \"\"\" import hypixel import json client = hypixel.Client(key) player = client.get_player(\"SomeHypixelNon\")", "replace jsonfile with the directory of the file you want to write to!", "jsonfile with the directory of the file you want to write to! json.dump(f,", "import json client = hypixel.Client(key) player = client.get_player(\"SomeHypixelNon\") stats = client.run(player.get_stats()) with open(jsonfile,", "'w') as f: # replace jsonfile with the directory of the file you", "player = client.get_player(\"SomeHypixelNon\") stats = client.run(player.get_stats()) with open(jsonfile, 'w') as f: # replace", "# replace jsonfile with the directory of the file you want to write", "= hypixel.Client(key) player = client.get_player(\"SomeHypixelNon\") stats = client.run(player.get_stats()) with open(jsonfile, 'w') as f:", "with open(jsonfile, 'w') as f: # replace jsonfile with the directory of the", "stats to a JSON file. \"\"\" import hypixel import json client = hypixel.Client(key)", "= client.get_player(\"SomeHypixelNon\") stats = client.run(player.get_stats()) with open(jsonfile, 'w') as f: # replace jsonfile", "json client = hypixel.Client(key) player = client.get_player(\"SomeHypixelNon\") stats = client.run(player.get_stats()) with open(jsonfile, 'w')", "client.get_player(\"SomeHypixelNon\") stats = client.run(player.get_stats()) with open(jsonfile, 'w') as f: # replace jsonfile with", "open(jsonfile, 'w') as f: # replace jsonfile with the directory of the file", "client.run(player.get_stats()) with open(jsonfile, 'w') as f: # replace jsonfile with the directory of", "as f: # replace jsonfile with the directory of the file you want", "= client.run(player.get_stats()) with open(jsonfile, 'w') as f: # replace jsonfile with the directory", "a JSON file. \"\"\" import hypixel import json client = hypixel.Client(key) player =", "hypixel.Client(key) player = client.get_player(\"SomeHypixelNon\") stats = client.run(player.get_stats()) with open(jsonfile, 'w') as f: #", "with the directory of the file you want to write to! json.dump(f, stats)", "\"\"\" import hypixel import json client = hypixel.Client(key) player = client.get_player(\"SomeHypixelNon\") stats =", "program writes your stats to a JSON file. \"\"\" import hypixel import json", "your stats to a JSON file. \"\"\" import hypixel import json client =", "f: # replace jsonfile with the directory of the file you want to", "This program writes your stats to a JSON file. \"\"\" import hypixel import", "<gh_stars>0 \"\"\" This program writes your stats to a JSON file. \"\"\" import", "to a JSON file. \"\"\" import hypixel import json client = hypixel.Client(key) player", "file. \"\"\" import hypixel import json client = hypixel.Client(key) player = client.get_player(\"SomeHypixelNon\") stats", "import hypixel import json client = hypixel.Client(key) player = client.get_player(\"SomeHypixelNon\") stats = client.run(player.get_stats())" ]
[ "self.bot.get_user(int(num)) # get the user if they gave an ID else: clear_member =", "self.bot.get_user(int(user_men)) # get the user if they mentioned elif (num.isdigit()) and (len(num) ==", "gave is not a member') else: async for message in ctx.channel.history(limit=None): limit +=", "a number is given and its not an ID int_num = int(num) if", "main from discord.ext import commands from cogs.help import Help class Clear(commands.Cog): def __init__(self,", "except IndexError: user_men = '' if ctx.guild is None: await main.error_embed(ctx, 'You cannot", "else: int_num2 = int(num2) if int_num2 > 0: limit = 0 if user_men", "'' if ctx.guild is None: await main.error_embed(ctx, 'You cannot use this command in", "ctx.channel.history(limit=None): limit += 1 if message.author == clear_member: int_num2 -= 1 if int_num2", "its not an ID int_num = int(num) if int_num > 0: await ctx.channel.purge(limit=int_num)", "ctx.author) print(f'{ctx.author.id} cleared {int_num} messages in {ctx.channel}') else: await main.error_embed(ctx, 'You need to", "it was already deleted pass channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(ctx, 'Bulk messages", "and its not an ID int_num = int(num) if int_num > 0: await", "self.bot.fetch_channel(main.ids(3)) await main.log_embed(ctx, 'Bulk messages deleted', f'{ctx.author.mention} cleared {num2} messages in {ctx.channel.mention} from", "else: await main.error_embed(ctx, 'You need to give a positive non zero number') else:", "if message.author == clear_member: int_num2 -= 1 if int_num2 == 0: break def", "was already deleted pass channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(ctx, 'Bulk messages deleted',", "the name you gave is not a member') else: async for message in", "int_num > 0: await ctx.channel.purge(limit=int_num) channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(None, 'Bulk messages", "is given and its not an ID int_num = int(num) if int_num >", "in {ctx.channel.mention}', channel, ctx.author) print(f'{ctx.author.id} cleared {int_num} messages in {ctx.channel}') else: await main.error_embed(ctx,", "int(num2) if int_num2 > 0: limit = 0 if user_men != '': clear_member", "error if it was already deleted pass channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(ctx,", "0: limit = 0 if user_men != '': clear_member = self.bot.get_user(int(user_men)) # get", "channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(None, 'Bulk messages deleted', f'{ctx.author.mention} cleared {int_num} messages", "== clear_member: int_num2 -= 1 if int_num2 == 0: break def member_check(m): return", "ctx.channel.purge(limit=limit, check=member_check) try: await ctx.message.delete() # delete the command except discord.NotFound: # ignore", "member') else: async for message in ctx.channel.history(limit=None): limit += 1 if message.author ==", "num2=None): if num is None: return await Help.clear(self, ctx) try: user_men = str(ctx.message.raw_mentions[0])", "await Help.clear(self, ctx) try: user_men = str(ctx.message.raw_mentions[0]) except IndexError: user_men = '' if", "messages in {ctx.channel.mention}', channel, ctx.author) print(f'{ctx.author.id} cleared {int_num} messages in {ctx.channel}') else: await", "@commands.group(invoke_without_command=True, case_insensitive=True, aliases=['cl', 'pg', 'purge']) @commands.check(main.mod_group) async def clear(self, ctx, num=None, num2=None): if", "bot): \"\"\"Returns embeds for the clear command.\"\"\" self.bot = bot @commands.group(invoke_without_command=True, case_insensitive=True, aliases=['cl',", "ctx, num=None, num2=None): if num is None: return await Help.clear(self, ctx) try: user_men", "clear_member = self.bot.get_user(int(user_men)) # get the user if they mentioned elif (num.isdigit()) and", "None: await main.error_embed(ctx, 'The user you gave is either invalid or the name", "0: break def member_check(m): return m.author == clear_member await ctx.channel.purge(limit=limit, check=member_check) try: await", "await ctx.channel.purge(limit=limit, check=member_check) try: await ctx.message.delete() # delete the command except discord.NotFound: #", "# make sure a number is given and its not an ID int_num", "(user_men == '') and (len(num) != 18) and (num.isdigit()): # make sure a", "import commands from cogs.help import Help class Clear(commands.Cog): def __init__(self, bot): \"\"\"Returns embeds", "'purge']) @commands.check(main.mod_group) async def clear(self, ctx, num=None, num2=None): if num is None: return", "is None: return await Help.clear(self, ctx) try: user_men = str(ctx.message.raw_mentions[0]) except IndexError: user_men", "commands from cogs.help import Help class Clear(commands.Cog): def __init__(self, bot): \"\"\"Returns embeds for", "{clear_member.id}') else: await main.error_embed(ctx, 'You need to give a positive non zero number')", "command.\"\"\" self.bot = bot @commands.group(invoke_without_command=True, case_insensitive=True, aliases=['cl', 'pg', 'purge']) @commands.check(main.mod_group) async def clear(self,", "\"\"\"Returns embeds for the clear command.\"\"\" self.bot = bot @commands.group(invoke_without_command=True, case_insensitive=True, aliases=['cl', 'pg',", "num is None: return await Help.clear(self, ctx) try: user_men = str(ctx.message.raw_mentions[0]) except IndexError:", "if clear_member is None: await main.error_embed(ctx, 'The user you gave is either invalid", "get the user if they mentioned elif (num.isdigit()) and (len(num) == 18): clear_member", "None: return await Help.clear(self, ctx) try: user_men = str(ctx.message.raw_mentions[0]) except IndexError: user_men =", "use this command in DMs') else: if (user_men == '') and (len(num) !=", "'You cannot use this command in DMs') else: if (user_men == '') and", "get the member if they gave a name with/without discrimitor if clear_member is", "int_num2 -= 1 if int_num2 == 0: break def member_check(m): return m.author ==", "messages in {ctx.channel.id} from {clear_member.id}') else: await main.error_embed(ctx, 'You need to give a", "if user_men != '': clear_member = self.bot.get_user(int(user_men)) # get the user if they", "to give a positive non zero number') else: int_num2 = int(num2) if int_num2", "{ctx.channel.mention}', channel, ctx.author) print(f'{ctx.author.id} cleared {int_num} messages in {ctx.channel}') else: await main.error_embed(ctx, 'You", "main.error_embed(ctx, 'You cannot use this command in DMs') else: if (user_men == '')", "await ctx.channel.purge(limit=int_num) channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(None, 'Bulk messages deleted', f'{ctx.author.mention} cleared", "give a positive non zero number') else: int_num2 = int(num2) if int_num2 >", "either invalid or the name you gave is not a member') else: async", "deleted pass channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(ctx, 'Bulk messages deleted', f'{ctx.author.mention} cleared", "an ID else: clear_member = ctx.guild.get_member_named(num) # get the member if they gave", "print(f'{ctx.author.id} cleared {num2} messages in {ctx.channel.id} from {clear_member.id}') else: await main.error_embed(ctx, 'You need", "case_insensitive=True, aliases=['cl', 'pg', 'purge']) @commands.check(main.mod_group) async def clear(self, ctx, num=None, num2=None): if num", "member_check(m): return m.author == clear_member await ctx.channel.purge(limit=limit, check=member_check) try: await ctx.message.delete() # delete", "await main.log_embed(ctx, 'Bulk messages deleted', f'{ctx.author.mention} cleared {num2} messages in {ctx.channel.mention} from {clear_member.mention}',", "clear_member is None: await main.error_embed(ctx, 'The user you gave is either invalid or", "{ctx.channel.mention} from {clear_member.mention}', channel, clear_member) print(f'{ctx.author.id} cleared {num2} messages in {ctx.channel.id} from {clear_member.id}')", "channel, clear_member) print(f'{ctx.author.id} cleared {num2} messages in {ctx.channel.id} from {clear_member.id}') else: await main.error_embed(ctx,", "embeds for the clear command.\"\"\" self.bot = bot @commands.group(invoke_without_command=True, case_insensitive=True, aliases=['cl', 'pg', 'purge'])", "import discord import main from discord.ext import commands from cogs.help import Help class", "{ctx.channel}') else: await main.error_embed(ctx, 'You need to give a positive non zero number')", "they gave an ID else: clear_member = ctx.guild.get_member_named(num) # get the member if", "from {clear_member.id}') else: await main.error_embed(ctx, 'You need to give a positive non zero", "command in DMs') else: if (user_men == '') and (len(num) != 18) and", "elif (num.isdigit()) and (len(num) == 18): clear_member = self.bot.get_user(int(num)) # get the user", "user you gave is either invalid or the name you gave is not", "ctx.message.delete() # delete the command except discord.NotFound: # ignore error if it was", "get the user if they gave an ID else: clear_member = ctx.guild.get_member_named(num) #", "clear_member = ctx.guild.get_member_named(num) # get the member if they gave a name with/without", "await self.bot.fetch_channel(main.ids(3)) await main.log_embed(None, 'Bulk messages deleted', f'{ctx.author.mention} cleared {int_num} messages in {ctx.channel.mention}',", "ctx.guild.get_member_named(num) # get the member if they gave a name with/without discrimitor if", "print(f'{ctx.author.id} cleared {int_num} messages in {ctx.channel}') else: await main.error_embed(ctx, 'You need to give", "except discord.NotFound: # ignore error if it was already deleted pass channel =", "str(ctx.message.raw_mentions[0]) except IndexError: user_men = '' if ctx.guild is None: await main.error_embed(ctx, 'You", "from {clear_member.mention}', channel, clear_member) print(f'{ctx.author.id} cleared {num2} messages in {ctx.channel.id} from {clear_member.id}') else:", "they gave a name with/without discrimitor if clear_member is None: await main.error_embed(ctx, 'The", "f'{ctx.author.mention} cleared {int_num} messages in {ctx.channel.mention}', channel, ctx.author) print(f'{ctx.author.id} cleared {int_num} messages in", "the user if they mentioned elif (num.isdigit()) and (len(num) == 18): clear_member =", "gave a name with/without discrimitor if clear_member is None: await main.error_embed(ctx, 'The user", "(num.isdigit()) and (len(num) == 18): clear_member = self.bot.get_user(int(num)) # get the user if", "message in ctx.channel.history(limit=None): limit += 1 if message.author == clear_member: int_num2 -= 1", "is None: await main.error_embed(ctx, 'You cannot use this command in DMs') else: if", "None: await main.error_embed(ctx, 'You cannot use this command in DMs') else: if (user_men", "{num2} messages in {ctx.channel.mention} from {clear_member.mention}', channel, clear_member) print(f'{ctx.author.id} cleared {num2} messages in", "Help class Clear(commands.Cog): def __init__(self, bot): \"\"\"Returns embeds for the clear command.\"\"\" self.bot", "in DMs') else: if (user_men == '') and (len(num) != 18) and (num.isdigit()):", "zero number') else: int_num2 = int(num2) if int_num2 > 0: limit = 0", "ID int_num = int(num) if int_num > 0: await ctx.channel.purge(limit=int_num) channel = await", "= 0 if user_men != '': clear_member = self.bot.get_user(int(user_men)) # get the user", "= int(num2) if int_num2 > 0: limit = 0 if user_men != '':", "message.author == clear_member: int_num2 -= 1 if int_num2 == 0: break def member_check(m):", "await self.bot.fetch_channel(main.ids(3)) await main.log_embed(ctx, 'Bulk messages deleted', f'{ctx.author.mention} cleared {num2} messages in {ctx.channel.mention}", "int_num2 == 0: break def member_check(m): return m.author == clear_member await ctx.channel.purge(limit=limit, check=member_check)", "DMs') else: if (user_men == '') and (len(num) != 18) and (num.isdigit()): #", "# get the member if they gave a name with/without discrimitor if clear_member", "make sure a number is given and its not an ID int_num =", "positive non zero number') else: int_num2 = int(num2) if int_num2 > 0: limit", "'') and (len(num) != 18) and (num.isdigit()): # make sure a number is", "you gave is not a member') else: async for message in ctx.channel.history(limit=None): limit", "clear_member await ctx.channel.purge(limit=limit, check=member_check) try: await ctx.message.delete() # delete the command except discord.NotFound:", "import Help class Clear(commands.Cog): def __init__(self, bot): \"\"\"Returns embeds for the clear command.\"\"\"", "bot @commands.group(invoke_without_command=True, case_insensitive=True, aliases=['cl', 'pg', 'purge']) @commands.check(main.mod_group) async def clear(self, ctx, num=None, num2=None):", "cleared {num2} messages in {ctx.channel.mention} from {clear_member.mention}', channel, clear_member) print(f'{ctx.author.id} cleared {num2} messages", "= bot @commands.group(invoke_without_command=True, case_insensitive=True, aliases=['cl', 'pg', 'purge']) @commands.check(main.mod_group) async def clear(self, ctx, num=None,", "def clear(self, ctx, num=None, num2=None): if num is None: return await Help.clear(self, ctx)", "non zero number') else: int_num2 = int(num2) if int_num2 > 0: limit =", "def __init__(self, bot): \"\"\"Returns embeds for the clear command.\"\"\" self.bot = bot @commands.group(invoke_without_command=True,", "gave an ID else: clear_member = ctx.guild.get_member_named(num) # get the member if they", "the member if they gave a name with/without discrimitor if clear_member is None:", "a name with/without discrimitor if clear_member is None: await main.error_embed(ctx, 'The user you", "is either invalid or the name you gave is not a member') else:", "try: user_men = str(ctx.message.raw_mentions[0]) except IndexError: user_men = '' if ctx.guild is None:", "(num.isdigit()): # make sure a number is given and its not an ID", "delete the command except discord.NotFound: # ignore error if it was already deleted", "discord.NotFound: # ignore error if it was already deleted pass channel = await", "main.log_embed(ctx, 'Bulk messages deleted', f'{ctx.author.mention} cleared {num2} messages in {ctx.channel.mention} from {clear_member.mention}', channel,", "int(num) if int_num > 0: await ctx.channel.purge(limit=int_num) channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(None,", "deleted', f'{ctx.author.mention} cleared {num2} messages in {ctx.channel.mention} from {clear_member.mention}', channel, clear_member) print(f'{ctx.author.id} cleared", "!= 18) and (num.isdigit()): # make sure a number is given and its", "18): clear_member = self.bot.get_user(int(num)) # get the user if they gave an ID", "{int_num} messages in {ctx.channel}') else: await main.error_embed(ctx, 'You need to give a positive", "async def clear(self, ctx, num=None, num2=None): if num is None: return await Help.clear(self,", "0 if user_men != '': clear_member = self.bot.get_user(int(user_men)) # get the user if", "== clear_member await ctx.channel.purge(limit=limit, check=member_check) try: await ctx.message.delete() # delete the command except", "from cogs.help import Help class Clear(commands.Cog): def __init__(self, bot): \"\"\"Returns embeds for the", "if they gave a name with/without discrimitor if clear_member is None: await main.error_embed(ctx,", "this command in DMs') else: if (user_men == '') and (len(num) != 18)", "= await self.bot.fetch_channel(main.ids(3)) await main.log_embed(None, 'Bulk messages deleted', f'{ctx.author.mention} cleared {int_num} messages in", "channel, ctx.author) print(f'{ctx.author.id} cleared {int_num} messages in {ctx.channel}') else: await main.error_embed(ctx, 'You need", "__init__(self, bot): \"\"\"Returns embeds for the clear command.\"\"\" self.bot = bot @commands.group(invoke_without_command=True, case_insensitive=True,", "in {ctx.channel}') else: await main.error_embed(ctx, 'You need to give a positive non zero", "else: if (user_men == '') and (len(num) != 18) and (num.isdigit()): # make", "break def member_check(m): return m.author == clear_member await ctx.channel.purge(limit=limit, check=member_check) try: await ctx.message.delete()", "ctx.channel.purge(limit=int_num) channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(None, 'Bulk messages deleted', f'{ctx.author.mention} cleared {int_num}", "return m.author == clear_member await ctx.channel.purge(limit=limit, check=member_check) try: await ctx.message.delete() # delete the", "> 0: limit = 0 if user_men != '': clear_member = self.bot.get_user(int(user_men)) #", "if ctx.guild is None: await main.error_embed(ctx, 'You cannot use this command in DMs')", "you gave is either invalid or the name you gave is not a", "and (len(num) != 18) and (num.isdigit()): # make sure a number is given", "they mentioned elif (num.isdigit()) and (len(num) == 18): clear_member = self.bot.get_user(int(num)) # get", "if int_num2 > 0: limit = 0 if user_men != '': clear_member =", "return await Help.clear(self, ctx) try: user_men = str(ctx.message.raw_mentions[0]) except IndexError: user_men = ''", "# get the user if they gave an ID else: clear_member = ctx.guild.get_member_named(num)", "user_men = '' if ctx.guild is None: await main.error_embed(ctx, 'You cannot use this", "cleared {int_num} messages in {ctx.channel.mention}', channel, ctx.author) print(f'{ctx.author.id} cleared {int_num} messages in {ctx.channel}')", "await main.error_embed(ctx, 'You need to give a positive non zero number') else: int_num2", "clear_member = self.bot.get_user(int(num)) # get the user if they gave an ID else:", "(len(num) != 18) and (num.isdigit()): # make sure a number is given and", "= self.bot.get_user(int(user_men)) # get the user if they mentioned elif (num.isdigit()) and (len(num)", "if they gave an ID else: clear_member = ctx.guild.get_member_named(num) # get the member", "check=member_check) try: await ctx.message.delete() # delete the command except discord.NotFound: # ignore error", "(len(num) == 18): clear_member = self.bot.get_user(int(num)) # get the user if they gave", "f'{ctx.author.mention} cleared {num2} messages in {ctx.channel.mention} from {clear_member.mention}', channel, clear_member) print(f'{ctx.author.id} cleared {num2}", "clear command.\"\"\" self.bot = bot @commands.group(invoke_without_command=True, case_insensitive=True, aliases=['cl', 'pg', 'purge']) @commands.check(main.mod_group) async def", "not a member') else: async for message in ctx.channel.history(limit=None): limit += 1 if", "# ignore error if it was already deleted pass channel = await self.bot.fetch_channel(main.ids(3))", "discord import main from discord.ext import commands from cogs.help import Help class Clear(commands.Cog):", "else: clear_member = ctx.guild.get_member_named(num) # get the member if they gave a name", "if it was already deleted pass channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(ctx, 'Bulk", "main.log_embed(None, 'Bulk messages deleted', f'{ctx.author.mention} cleared {int_num} messages in {ctx.channel.mention}', channel, ctx.author) print(f'{ctx.author.id}", "== 18): clear_member = self.bot.get_user(int(num)) # get the user if they gave an", "messages deleted', f'{ctx.author.mention} cleared {num2} messages in {ctx.channel.mention} from {clear_member.mention}', channel, clear_member) print(f'{ctx.author.id}", "{ctx.channel.id} from {clear_member.id}') else: await main.error_embed(ctx, 'You need to give a positive non", "need to give a positive non zero number') else: int_num2 = int(num2) if", "0: await ctx.channel.purge(limit=int_num) channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(None, 'Bulk messages deleted', f'{ctx.author.mention}", "and (len(num) == 18): clear_member = self.bot.get_user(int(num)) # get the user if they", "{int_num} messages in {ctx.channel.mention}', channel, ctx.author) print(f'{ctx.author.id} cleared {int_num} messages in {ctx.channel}') else:", "already deleted pass channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(ctx, 'Bulk messages deleted', f'{ctx.author.mention}", "if num is None: return await Help.clear(self, ctx) try: user_men = str(ctx.message.raw_mentions[0]) except", "name with/without discrimitor if clear_member is None: await main.error_embed(ctx, 'The user you gave", "aliases=['cl', 'pg', 'purge']) @commands.check(main.mod_group) async def clear(self, ctx, num=None, num2=None): if num is", "clear(self, ctx, num=None, num2=None): if num is None: return await Help.clear(self, ctx) try:", "member if they gave a name with/without discrimitor if clear_member is None: await", "discrimitor if clear_member is None: await main.error_embed(ctx, 'The user you gave is either", "or the name you gave is not a member') else: async for message", "clear_member: int_num2 -= 1 if int_num2 == 0: break def member_check(m): return m.author", "int_num2 = int(num2) if int_num2 > 0: limit = 0 if user_men !=", "ignore error if it was already deleted pass channel = await self.bot.fetch_channel(main.ids(3)) await", "in {ctx.channel.id} from {clear_member.id}') else: await main.error_embed(ctx, 'You need to give a positive", "== 0: break def member_check(m): return m.author == clear_member await ctx.channel.purge(limit=limit, check=member_check) try:", "command except discord.NotFound: # ignore error if it was already deleted pass channel", "await main.error_embed(ctx, 'You cannot use this command in DMs') else: if (user_men ==", "cleared {int_num} messages in {ctx.channel}') else: await main.error_embed(ctx, 'You need to give a", "messages in {ctx.channel}') else: await main.error_embed(ctx, 'You need to give a positive non", "from discord.ext import commands from cogs.help import Help class Clear(commands.Cog): def __init__(self, bot):", "= await self.bot.fetch_channel(main.ids(3)) await main.log_embed(ctx, 'Bulk messages deleted', f'{ctx.author.mention} cleared {num2} messages in", "an ID int_num = int(num) if int_num > 0: await ctx.channel.purge(limit=int_num) channel =", "messages in {ctx.channel.mention} from {clear_member.mention}', channel, clear_member) print(f'{ctx.author.id} cleared {num2} messages in {ctx.channel.id}", "cogs.help import Help class Clear(commands.Cog): def __init__(self, bot): \"\"\"Returns embeds for the clear", "ctx.guild is None: await main.error_embed(ctx, 'You cannot use this command in DMs') else:", "sure a number is given and its not an ID int_num = int(num)", "'Bulk messages deleted', f'{ctx.author.mention} cleared {num2} messages in {ctx.channel.mention} from {clear_member.mention}', channel, clear_member)", "await main.error_embed(ctx, 'You need to give a positive non zero number') def setup(bot):", "the clear command.\"\"\" self.bot = bot @commands.group(invoke_without_command=True, case_insensitive=True, aliases=['cl', 'pg', 'purge']) @commands.check(main.mod_group) async", "pass channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(ctx, 'Bulk messages deleted', f'{ctx.author.mention} cleared {num2}", "m.author == clear_member await ctx.channel.purge(limit=limit, check=member_check) try: await ctx.message.delete() # delete the command", "= str(ctx.message.raw_mentions[0]) except IndexError: user_men = '' if ctx.guild is None: await main.error_embed(ctx,", "= self.bot.get_user(int(num)) # get the user if they gave an ID else: clear_member", "await main.error_embed(ctx, 'The user you gave is either invalid or the name you", "messages deleted', f'{ctx.author.mention} cleared {int_num} messages in {ctx.channel.mention}', channel, ctx.author) print(f'{ctx.author.id} cleared {int_num}", "await main.log_embed(None, 'Bulk messages deleted', f'{ctx.author.mention} cleared {int_num} messages in {ctx.channel.mention}', channel, ctx.author)", "1 if message.author == clear_member: int_num2 -= 1 if int_num2 == 0: break", "invalid or the name you gave is not a member') else: async for", "await ctx.message.delete() # delete the command except discord.NotFound: # ignore error if it", "name you gave is not a member') else: async for message in ctx.channel.history(limit=None):", "user_men = str(ctx.message.raw_mentions[0]) except IndexError: user_men = '' if ctx.guild is None: await", "-= 1 if int_num2 == 0: break def member_check(m): return m.author == clear_member", "for the clear command.\"\"\" self.bot = bot @commands.group(invoke_without_command=True, case_insensitive=True, aliases=['cl', 'pg', 'purge']) @commands.check(main.mod_group)", "channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(ctx, 'Bulk messages deleted', f'{ctx.author.mention} cleared {num2} messages", "{num2} messages in {ctx.channel.id} from {clear_member.id}') else: await main.error_embed(ctx, 'You need to give", "in ctx.channel.history(limit=None): limit += 1 if message.author == clear_member: int_num2 -= 1 if", "a positive non zero number') else: int_num2 = int(num2) if int_num2 > 0:", "= '' if ctx.guild is None: await main.error_embed(ctx, 'You cannot use this command", "18) and (num.isdigit()): # make sure a number is given and its not", "main.error_embed(ctx, 'The user you gave is either invalid or the name you gave", "if (user_men == '') and (len(num) != 18) and (num.isdigit()): # make sure", "def member_check(m): return m.author == clear_member await ctx.channel.purge(limit=limit, check=member_check) try: await ctx.message.delete() #", "Help.clear(self, ctx) try: user_men = str(ctx.message.raw_mentions[0]) except IndexError: user_men = '' if ctx.guild", "clear_member) print(f'{ctx.author.id} cleared {num2} messages in {ctx.channel.id} from {clear_member.id}') else: await main.error_embed(ctx, 'You", "self.bot = bot @commands.group(invoke_without_command=True, case_insensitive=True, aliases=['cl', 'pg', 'purge']) @commands.check(main.mod_group) async def clear(self, ctx,", "cannot use this command in DMs') else: if (user_men == '') and (len(num)", "= ctx.guild.get_member_named(num) # get the member if they gave a name with/without discrimitor", "user if they gave an ID else: clear_member = ctx.guild.get_member_named(num) # get the", "limit += 1 if message.author == clear_member: int_num2 -= 1 if int_num2 ==", "cleared {num2} messages in {ctx.channel.id} from {clear_member.id}') else: await main.error_embed(ctx, 'You need to", "user if they mentioned elif (num.isdigit()) and (len(num) == 18): clear_member = self.bot.get_user(int(num))", "# get the user if they mentioned elif (num.isdigit()) and (len(num) == 18):", "number') else: int_num2 = int(num2) if int_num2 > 0: limit = 0 if", "user_men != '': clear_member = self.bot.get_user(int(user_men)) # get the user if they mentioned", "and (num.isdigit()): # make sure a number is given and its not an", "'': clear_member = self.bot.get_user(int(user_men)) # get the user if they mentioned elif (num.isdigit())", "else: async for message in ctx.channel.history(limit=None): limit += 1 if message.author == clear_member:", "ctx) try: user_men = str(ctx.message.raw_mentions[0]) except IndexError: user_men = '' if ctx.guild is", "main.error_embed(ctx, 'You need to give a positive non zero number') def setup(bot): bot.add_cog(Clear(bot))", "'The user you gave is either invalid or the name you gave is", "class Clear(commands.Cog): def __init__(self, bot): \"\"\"Returns embeds for the clear command.\"\"\" self.bot =", "'pg', 'purge']) @commands.check(main.mod_group) async def clear(self, ctx, num=None, num2=None): if num is None:", "= int(num) if int_num > 0: await ctx.channel.purge(limit=int_num) channel = await self.bot.fetch_channel(main.ids(3)) await", "with/without discrimitor if clear_member is None: await main.error_embed(ctx, 'The user you gave is", "# delete the command except discord.NotFound: # ignore error if it was already", "in {ctx.channel.mention} from {clear_member.mention}', channel, clear_member) print(f'{ctx.author.id} cleared {num2} messages in {ctx.channel.id} from", "is None: await main.error_embed(ctx, 'The user you gave is either invalid or the", "int_num2 > 0: limit = 0 if user_men != '': clear_member = self.bot.get_user(int(user_men))", "IndexError: user_men = '' if ctx.guild is None: await main.error_embed(ctx, 'You cannot use", "not an ID int_num = int(num) if int_num > 0: await ctx.channel.purge(limit=int_num) channel", "async for message in ctx.channel.history(limit=None): limit += 1 if message.author == clear_member: int_num2", "int_num = int(num) if int_num > 0: await ctx.channel.purge(limit=int_num) channel = await self.bot.fetch_channel(main.ids(3))", "limit = 0 if user_men != '': clear_member = self.bot.get_user(int(user_men)) # get the", "given and its not an ID int_num = int(num) if int_num > 0:", "'Bulk messages deleted', f'{ctx.author.mention} cleared {int_num} messages in {ctx.channel.mention}', channel, ctx.author) print(f'{ctx.author.id} cleared", "import main from discord.ext import commands from cogs.help import Help class Clear(commands.Cog): def", "@commands.check(main.mod_group) async def clear(self, ctx, num=None, num2=None): if num is None: return await", "!= '': clear_member = self.bot.get_user(int(user_men)) # get the user if they mentioned elif", "is not a member') else: async for message in ctx.channel.history(limit=None): limit += 1", "'You need to give a positive non zero number') else: int_num2 = int(num2)", "a member') else: async for message in ctx.channel.history(limit=None): limit += 1 if message.author", "the user if they gave an ID else: clear_member = ctx.guild.get_member_named(num) # get", "+= 1 if message.author == clear_member: int_num2 -= 1 if int_num2 == 0:", "if int_num2 == 0: break def member_check(m): return m.author == clear_member await ctx.channel.purge(limit=limit,", "main.error_embed(ctx, 'You need to give a positive non zero number') else: int_num2 =", "deleted', f'{ctx.author.mention} cleared {int_num} messages in {ctx.channel.mention}', channel, ctx.author) print(f'{ctx.author.id} cleared {int_num} messages", "ID else: clear_member = ctx.guild.get_member_named(num) # get the member if they gave a", "== '') and (len(num) != 18) and (num.isdigit()): # make sure a number", "self.bot.fetch_channel(main.ids(3)) await main.log_embed(None, 'Bulk messages deleted', f'{ctx.author.mention} cleared {int_num} messages in {ctx.channel.mention}', channel,", "the command except discord.NotFound: # ignore error if it was already deleted pass", "num=None, num2=None): if num is None: return await Help.clear(self, ctx) try: user_men =", "if int_num > 0: await ctx.channel.purge(limit=int_num) channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(None, 'Bulk", "mentioned elif (num.isdigit()) and (len(num) == 18): clear_member = self.bot.get_user(int(num)) # get the", "{clear_member.mention}', channel, clear_member) print(f'{ctx.author.id} cleared {num2} messages in {ctx.channel.id} from {clear_member.id}') else: await", "if they mentioned elif (num.isdigit()) and (len(num) == 18): clear_member = self.bot.get_user(int(num)) #", "else: await main.error_embed(ctx, 'You need to give a positive non zero number') def", "try: await ctx.message.delete() # delete the command except discord.NotFound: # ignore error if", "> 0: await ctx.channel.purge(limit=int_num) channel = await self.bot.fetch_channel(main.ids(3)) await main.log_embed(None, 'Bulk messages deleted',", "gave is either invalid or the name you gave is not a member')", "1 if int_num2 == 0: break def member_check(m): return m.author == clear_member await", "for message in ctx.channel.history(limit=None): limit += 1 if message.author == clear_member: int_num2 -=", "discord.ext import commands from cogs.help import Help class Clear(commands.Cog): def __init__(self, bot): \"\"\"Returns", "Clear(commands.Cog): def __init__(self, bot): \"\"\"Returns embeds for the clear command.\"\"\" self.bot = bot", "number is given and its not an ID int_num = int(num) if int_num" ]
[ "%database,]) ) def SQLExec(query, server, dataBase, commit): with GetConnection(server,dataBase) as connection: with connection.cursor()", "d ) return lst # return [{k[0]: v for k, v in zip(", "cursor.execute(query) try: rows = cursor.fetchall() except: rows = [] description = cursor.description #[col[0]", "zip( description, row )} for row in rows] #%% if __name__ == \"__main__\":", "import pyodbc from collections import OrderedDict #%% def GetConnection(server, database): return pyodbc.connect( ''.join(", "[dbo].[Company]\"\"\" lst_of_dicts = SQLExec(query, server, dataBase, commit) from pandas import DataFrame df =", "row in rows: d = OrderedDict() for k, v in zip( description, row", "connection: with connection.cursor() as cursor: cursor.execute(query) try: rows = cursor.fetchall() except: rows =", ") def SQLExec(query, server, dataBase, commit): with GetConnection(server,dataBase) as connection: with connection.cursor() as", "for k, v in zip( description, row )} for row in rows] #%%", "13 for SQL Server};', r'Trusted_Connection=yes;', r'SERVER=%s;' %server, r'DATABASE=%s;' %database,]) ) def SQLExec(query, server,", "try: rows = cursor.fetchall() except: rows = [] description = cursor.description #[col[0] for", "dataBase, commit): with GetConnection(server,dataBase) as connection: with connection.cursor() as cursor: cursor.execute(query) try: rows", "utf-8 -*- \"\"\" Created on Fri Mar 15 14:52:34 2019 @author: a.mohammadi \"\"\"", "as cursor: cursor.execute(query) try: rows = cursor.fetchall() except: rows = [] description =", "return pyodbc.connect( ''.join( [r'DRIVER={ODBC Driver 13 for SQL Server};', r'Trusted_Connection=yes;', r'SERVER=%s;' %server, r'DATABASE=%s;'", "#[col[0] for col in cursor.description] if commit: connection.commit() lst = [] for row", "SQLExec(query, server, dataBase, commit) from pandas import DataFrame df = DataFrame( lst_of_dicts )", "= [] for row in rows: d = OrderedDict() for k, v in", "pyodbc.connect( ''.join( [r'DRIVER={ODBC Driver 13 for SQL Server};', r'Trusted_Connection=yes;', r'SERVER=%s;' %server, r'DATABASE=%s;' %database,])", "#%% if __name__ == \"__main__\": server, dataBase, commit = 'AMESYD03','SafeEc', 0 query =", "def GetConnection(server, database): return pyodbc.connect( ''.join( [r'DRIVER={ODBC Driver 13 for SQL Server};', r'Trusted_Connection=yes;',", "in rows] #%% if __name__ == \"__main__\": server, dataBase, commit = 'AMESYD03','SafeEc', 0", "0 query = \"\"\"SELECT TOP 10 * FROM [dbo].[Company]\"\"\" lst_of_dicts = SQLExec(query, server,", "\"\"\" Created on Fri Mar 15 14:52:34 2019 @author: a.mohammadi \"\"\" import pyodbc", "a.mohammadi \"\"\" import pyodbc from collections import OrderedDict #%% def GetConnection(server, database): return", "query = \"\"\"SELECT TOP 10 * FROM [dbo].[Company]\"\"\" lst_of_dicts = SQLExec(query, server, dataBase,", "cursor.description] if commit: connection.commit() lst = [] for row in rows: d =", "\"__main__\": server, dataBase, commit = 'AMESYD03','SafeEc', 0 query = \"\"\"SELECT TOP 10 *", "row ): d[k[0]] = v lst.append( d ) return lst # return [{k[0]:", "database): return pyodbc.connect( ''.join( [r'DRIVER={ODBC Driver 13 for SQL Server};', r'Trusted_Connection=yes;', r'SERVER=%s;' %server,", "'AMESYD03','SafeEc', 0 query = \"\"\"SELECT TOP 10 * FROM [dbo].[Company]\"\"\" lst_of_dicts = SQLExec(query,", "col in cursor.description] if commit: connection.commit() lst = [] for row in rows:", "in zip( description, row )} for row in rows] #%% if __name__ ==", "= SQLExec(query, server, dataBase, commit) from pandas import DataFrame df = DataFrame( lst_of_dicts", "description, row ): d[k[0]] = v lst.append( d ) return lst # return", "FROM [dbo].[Company]\"\"\" lst_of_dicts = SQLExec(query, server, dataBase, commit) from pandas import DataFrame df", "GetConnection(server,dataBase) as connection: with connection.cursor() as cursor: cursor.execute(query) try: rows = cursor.fetchall() except:", "r'DATABASE=%s;' %database,]) ) def SQLExec(query, server, dataBase, commit): with GetConnection(server,dataBase) as connection: with", "in cursor.description] if commit: connection.commit() lst = [] for row in rows: d", "coding: utf-8 -*- \"\"\" Created on Fri Mar 15 14:52:34 2019 @author: a.mohammadi", "= cursor.description #[col[0] for col in cursor.description] if commit: connection.commit() lst = []", "for k, v in zip( description, row ): d[k[0]] = v lst.append( d", "commit): with GetConnection(server,dataBase) as connection: with connection.cursor() as cursor: cursor.execute(query) try: rows =", "2019 @author: a.mohammadi \"\"\" import pyodbc from collections import OrderedDict #%% def GetConnection(server,", "v lst.append( d ) return lst # return [{k[0]: v for k, v", "in zip( description, row ): d[k[0]] = v lst.append( d ) return lst", "TOP 10 * FROM [dbo].[Company]\"\"\" lst_of_dicts = SQLExec(query, server, dataBase, commit) from pandas", "cursor.fetchall() except: rows = [] description = cursor.description #[col[0] for col in cursor.description]", "Created on Fri Mar 15 14:52:34 2019 @author: a.mohammadi \"\"\" import pyodbc from", "k, v in zip( description, row ): d[k[0]] = v lst.append( d )", ") return lst # return [{k[0]: v for k, v in zip( description,", "lst_of_dicts = SQLExec(query, server, dataBase, commit) from pandas import DataFrame df = DataFrame(", "SQL Server};', r'Trusted_Connection=yes;', r'SERVER=%s;' %server, r'DATABASE=%s;' %database,]) ) def SQLExec(query, server, dataBase, commit):", "pyodbc from collections import OrderedDict #%% def GetConnection(server, database): return pyodbc.connect( ''.join( [r'DRIVER={ODBC", "15 14:52:34 2019 @author: a.mohammadi \"\"\" import pyodbc from collections import OrderedDict #%%", "= cursor.fetchall() except: rows = [] description = cursor.description #[col[0] for col in", "): d[k[0]] = v lst.append( d ) return lst # return [{k[0]: v", "with connection.cursor() as cursor: cursor.execute(query) try: rows = cursor.fetchall() except: rows = []", "\"\"\"SELECT TOP 10 * FROM [dbo].[Company]\"\"\" lst_of_dicts = SQLExec(query, server, dataBase, commit) from", "for col in cursor.description] if commit: connection.commit() lst = [] for row in", "[{k[0]: v for k, v in zip( description, row )} for row in", "@author: a.mohammadi \"\"\" import pyodbc from collections import OrderedDict #%% def GetConnection(server, database):", "= [] description = cursor.description #[col[0] for col in cursor.description] if commit: connection.commit()", "lst # return [{k[0]: v for k, v in zip( description, row )}", "[] for row in rows: d = OrderedDict() for k, v in zip(", "description, row )} for row in rows] #%% if __name__ == \"__main__\": server,", "# return [{k[0]: v for k, v in zip( description, row )} for", "commit: connection.commit() lst = [] for row in rows: d = OrderedDict() for", "__name__ == \"__main__\": server, dataBase, commit = 'AMESYD03','SafeEc', 0 query = \"\"\"SELECT TOP", "def SQLExec(query, server, dataBase, commit): with GetConnection(server,dataBase) as connection: with connection.cursor() as cursor:", "rows = [] description = cursor.description #[col[0] for col in cursor.description] if commit:", "Mar 15 14:52:34 2019 @author: a.mohammadi \"\"\" import pyodbc from collections import OrderedDict", "server, dataBase, commit): with GetConnection(server,dataBase) as connection: with connection.cursor() as cursor: cursor.execute(query) try:", "v for k, v in zip( description, row )} for row in rows]", "row in rows] #%% if __name__ == \"__main__\": server, dataBase, commit = 'AMESYD03','SafeEc',", "Server};', r'Trusted_Connection=yes;', r'SERVER=%s;' %server, r'DATABASE=%s;' %database,]) ) def SQLExec(query, server, dataBase, commit): with", "d = OrderedDict() for k, v in zip( description, row ): d[k[0]] =", "import OrderedDict #%% def GetConnection(server, database): return pyodbc.connect( ''.join( [r'DRIVER={ODBC Driver 13 for", "description = cursor.description #[col[0] for col in cursor.description] if commit: connection.commit() lst =", "rows = cursor.fetchall() except: rows = [] description = cursor.description #[col[0] for col", "OrderedDict() for k, v in zip( description, row ): d[k[0]] = v lst.append(", "[] description = cursor.description #[col[0] for col in cursor.description] if commit: connection.commit() lst", "Fri Mar 15 14:52:34 2019 @author: a.mohammadi \"\"\" import pyodbc from collections import", "connection.cursor() as cursor: cursor.execute(query) try: rows = cursor.fetchall() except: rows = [] description", "if commit: connection.commit() lst = [] for row in rows: d = OrderedDict()", "dataBase, commit = 'AMESYD03','SafeEc', 0 query = \"\"\"SELECT TOP 10 * FROM [dbo].[Company]\"\"\"", "server, dataBase, commit = 'AMESYD03','SafeEc', 0 query = \"\"\"SELECT TOP 10 * FROM", "OrderedDict #%% def GetConnection(server, database): return pyodbc.connect( ''.join( [r'DRIVER={ODBC Driver 13 for SQL", "SQLExec(query, server, dataBase, commit): with GetConnection(server,dataBase) as connection: with connection.cursor() as cursor: cursor.execute(query)", "rows: d = OrderedDict() for k, v in zip( description, row ): d[k[0]]", ")} for row in rows] #%% if __name__ == \"__main__\": server, dataBase, commit", "-*- coding: utf-8 -*- \"\"\" Created on Fri Mar 15 14:52:34 2019 @author:", "for SQL Server};', r'Trusted_Connection=yes;', r'SERVER=%s;' %server, r'DATABASE=%s;' %database,]) ) def SQLExec(query, server, dataBase,", "14:52:34 2019 @author: a.mohammadi \"\"\" import pyodbc from collections import OrderedDict #%% def", "from collections import OrderedDict #%% def GetConnection(server, database): return pyodbc.connect( ''.join( [r'DRIVER={ODBC Driver", "zip( description, row ): d[k[0]] = v lst.append( d ) return lst #", "lst.append( d ) return lst # return [{k[0]: v for k, v in", "%server, r'DATABASE=%s;' %database,]) ) def SQLExec(query, server, dataBase, commit): with GetConnection(server,dataBase) as connection:", "#%% def GetConnection(server, database): return pyodbc.connect( ''.join( [r'DRIVER={ODBC Driver 13 for SQL Server};',", "v in zip( description, row ): d[k[0]] = v lst.append( d ) return", "''.join( [r'DRIVER={ODBC Driver 13 for SQL Server};', r'Trusted_Connection=yes;', r'SERVER=%s;' %server, r'DATABASE=%s;' %database,]) )", "in rows: d = OrderedDict() for k, v in zip( description, row ):", "collections import OrderedDict #%% def GetConnection(server, database): return pyodbc.connect( ''.join( [r'DRIVER={ODBC Driver 13", "return [{k[0]: v for k, v in zip( description, row )} for row", "= 'AMESYD03','SafeEc', 0 query = \"\"\"SELECT TOP 10 * FROM [dbo].[Company]\"\"\" lst_of_dicts =", "connection.commit() lst = [] for row in rows: d = OrderedDict() for k,", "on Fri Mar 15 14:52:34 2019 @author: a.mohammadi \"\"\" import pyodbc from collections", "if __name__ == \"__main__\": server, dataBase, commit = 'AMESYD03','SafeEc', 0 query = \"\"\"SELECT", "lst = [] for row in rows: d = OrderedDict() for k, v", "with GetConnection(server,dataBase) as connection: with connection.cursor() as cursor: cursor.execute(query) try: rows = cursor.fetchall()", "r'SERVER=%s;' %server, r'DATABASE=%s;' %database,]) ) def SQLExec(query, server, dataBase, commit): with GetConnection(server,dataBase) as", "[r'DRIVER={ODBC Driver 13 for SQL Server};', r'Trusted_Connection=yes;', r'SERVER=%s;' %server, r'DATABASE=%s;' %database,]) ) def", "rows] #%% if __name__ == \"__main__\": server, dataBase, commit = 'AMESYD03','SafeEc', 0 query", "= v lst.append( d ) return lst # return [{k[0]: v for k,", "except: rows = [] description = cursor.description #[col[0] for col in cursor.description] if", "for row in rows] #%% if __name__ == \"__main__\": server, dataBase, commit =", "= \"\"\"SELECT TOP 10 * FROM [dbo].[Company]\"\"\" lst_of_dicts = SQLExec(query, server, dataBase, commit)", "for row in rows: d = OrderedDict() for k, v in zip( description,", "r'Trusted_Connection=yes;', r'SERVER=%s;' %server, r'DATABASE=%s;' %database,]) ) def SQLExec(query, server, dataBase, commit): with GetConnection(server,dataBase)", "as connection: with connection.cursor() as cursor: cursor.execute(query) try: rows = cursor.fetchall() except: rows", "cursor: cursor.execute(query) try: rows = cursor.fetchall() except: rows = [] description = cursor.description", "-*- \"\"\" Created on Fri Mar 15 14:52:34 2019 @author: a.mohammadi \"\"\" import", "k, v in zip( description, row )} for row in rows] #%% if", "d[k[0]] = v lst.append( d ) return lst # return [{k[0]: v for", "= OrderedDict() for k, v in zip( description, row ): d[k[0]] = v", "cursor.description #[col[0] for col in cursor.description] if commit: connection.commit() lst = [] for", "== \"__main__\": server, dataBase, commit = 'AMESYD03','SafeEc', 0 query = \"\"\"SELECT TOP 10", "GetConnection(server, database): return pyodbc.connect( ''.join( [r'DRIVER={ODBC Driver 13 for SQL Server};', r'Trusted_Connection=yes;', r'SERVER=%s;'", "commit = 'AMESYD03','SafeEc', 0 query = \"\"\"SELECT TOP 10 * FROM [dbo].[Company]\"\"\" lst_of_dicts", "\"\"\" import pyodbc from collections import OrderedDict #%% def GetConnection(server, database): return pyodbc.connect(", "* FROM [dbo].[Company]\"\"\" lst_of_dicts = SQLExec(query, server, dataBase, commit) from pandas import DataFrame", "# -*- coding: utf-8 -*- \"\"\" Created on Fri Mar 15 14:52:34 2019", "10 * FROM [dbo].[Company]\"\"\" lst_of_dicts = SQLExec(query, server, dataBase, commit) from pandas import", "Driver 13 for SQL Server};', r'Trusted_Connection=yes;', r'SERVER=%s;' %server, r'DATABASE=%s;' %database,]) ) def SQLExec(query,", "row )} for row in rows] #%% if __name__ == \"__main__\": server, dataBase,", "return lst # return [{k[0]: v for k, v in zip( description, row", "v in zip( description, row )} for row in rows] #%% if __name__" ]
[ "* `T` is the sequence length, `B` is the batch size. * `H`", "return observations[:, :, :, margin3_top:margin3_bottom, margin4_top:margin4_bottom] def pixel_control_reward(observations, cell_size=4, output_size=None): ''' Args: observations:", "channel dimension (e.g., colour, stack). * `T` and `B` can be statically unknown.", "cummulative_reward[:, i + 1] cummulative_reward[:, i].add_(gamma, next_values) return cummulative_reward def value_loss(values, rewards, gamma):", "torch.no_grad(): observations = autocrop_observations(observations, cell_size, output_size=output_size) abs_observation_diff = observations[:, 1:] - observations[:, :-1]", "H / cell_size, W / cell_size) ''' with torch.no_grad(): observations = autocrop_observations(observations, cell_size,", "-(shape[0] - new_shape[0] - margin3_top) margin4_top = (shape[1] - new_shape[1]) // 2 margin4_bottom", "autocrop_observations(observations, cell_size, output_size=None): shape = observations.size()[3:] if output_size is None: new_shape = tuple(map(lambda", "colour, stack). * `T` and `B` can be statically unknown. cell_size: The size", "with torch.no_grad(): target = torch.zeros(predictions.size(), dtype=torch.float32, device=predictions.device) target[:, 0] = rewards == 0", "with torch.no_grad(): observations = autocrop_observations(observations, cell_size, output_size=output_size) abs_observation_diff = observations[:, 1:] - observations[:,", "cummulative_reward.size()[1] for i in reversed(range(max_t)): next_values = base_value if i + 1 ==", "output_size=None): shape = observations.size()[3:] if output_size is None: new_shape = tuple(map(lambda x: (x", "cell_size, stride=cell_size) avg_abs_diff = avg_abs_diff.mean(1, keepdim=True) return avg_abs_diff.view(*obs_shape[:2] + avg_abs_diff.size()[1:]) def pixel_control_loss(observations, actions,", "done, stats = self.env.step(action) self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32) self.last_action_reward[action] = 1.0", "F.mse_loss(values[:, :-1], cummulative_reward) class UnrealEnvBaseWrapper(gym.Wrapper): def __init__(self, env): super().__init__(env) self.last_action_reward = None self.observation_space", "q_actions) return loss def reward_prediction_loss(predictions, rewards): with torch.no_grad(): target = torch.zeros(predictions.size(), dtype=torch.float32, device=predictions.device)", "of shape `[B,T+1,C,H,W]`, where * `T` is the sequence length, `B` is the", "W / cell_size) ''' with torch.no_grad(): observations = autocrop_observations(observations, cell_size, output_size=output_size) abs_observation_diff =", ":, margin3_top:margin3_bottom, margin4_top:margin4_bottom] def pixel_control_reward(observations, cell_size=4, output_size=None): ''' Args: observations: A tensor of", "* `C...` is at least one channel dimension (e.g., colour, stack). * `T`", "autocrop_observations(observations, cell_size, output_size=output_size) abs_observation_diff = observations[:, 1:] - observations[:, :-1] abs_observation_diff.abs_() obs_shape =", "target[:, 1] = rewards > 0 target[:, 2] = rewards < 0 return", "target[:, 0] = rewards == 0 target[:, 1] = rewards > 0 target[:,", "- new_shape[1]) // 2 margin4_bottom = -(shape[1] - new_shape[1] - margin4_top) if margin3_bottom", "margin4_bottom == 0: margin4_bottom = None return observations[:, :, :, margin3_top:margin3_bottom, margin4_top:margin4_bottom] def", "sequence length, `B` is the batch size. * `H` is height, `W` is", "size of each cell. Returns: shape (B, T, 1, H / cell_size, W", "/ cell_size) ''' with torch.no_grad(): observations = autocrop_observations(observations, cell_size, output_size=output_size) abs_observation_diff = observations[:,", "one channel dimension (e.g., colour, stack). * `T` and `B` can be statically", "action): observation, reward, done, stats = self.env.step(action) self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32)", "return self.observation(self.env.reset()) def step(self, action): observation, reward, done, stats = self.env.step(action) self.last_action_reward =", "torch.no_grad(): cummulative_reward = discounted_commulative_reward(rewards, base_value, gamma) return F.mse_loss(values[:, :-1], cummulative_reward) class UnrealEnvBaseWrapper(gym.Wrapper): def", "if i + 1 == max_t else cummulative_reward[:, i + 1] cummulative_reward[:, i].add_(gamma,", "observations[:, 1:] - observations[:, :-1] abs_observation_diff.abs_() obs_shape = abs_observation_diff.size() abs_diff = abs_observation_diff.view(-1, *obs_shape[2:])", "= observations.size()[3:] if output_size is None: new_shape = tuple(map(lambda x: (x // cell_size)", "= self.env.step(action) self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32) self.last_action_reward[action] = 1.0 self.last_action_reward[-1] =", "i + 1 == max_t else cummulative_reward[:, i + 1] cummulative_reward[:, i].add_(gamma, next_values)", "cell_size, output_size)) margin3_top = (shape[0] - new_shape[0]) // 2 margin3_bottom = -(shape[0] -", "abs_observation_diff.abs_() obs_shape = abs_observation_diff.size() abs_diff = abs_observation_diff.view(-1, *obs_shape[2:]) avg_abs_diff = F.avg_pool2d(abs_diff, cell_size, stride=cell_size)", "= action_values.size() batch_shape = actions.size()[:2] with torch.no_grad(): T = observations.size()[1] - 1 pseudo_rewards", "== 0: margin3_bottom = None if margin4_bottom == 0: margin4_bottom = None return", "A tensor of shape `[B,T+1,C,H,W]`, where * `T` is the sequence length, `B`", "max_t = cummulative_reward.size()[1] for i in reversed(range(max_t)): next_values = base_value if i +", "output_size)) margin3_top = (shape[0] - new_shape[0]) // 2 margin3_bottom = -(shape[0] - new_shape[0]", "cummulative_reward) class UnrealEnvBaseWrapper(gym.Wrapper): def __init__(self, env): super().__init__(env) self.last_action_reward = None self.observation_space = gym.spaces.Tuple((", "np.zeros(self.action_space.n + 1, dtype=np.float32) return self.observation(self.env.reset()) def step(self, action): observation, reward, done, stats", "keepdim=True)[0] for i in reversed(range(T)): previous_rewards = last_rewards if i + 1 ==", "in reversed(range(T)): previous_rewards = last_rewards if i + 1 == T else pseudo_rewards[:,", "margin3_top = (shape[0] - new_shape[0]) // 2 margin3_bottom = -(shape[0] - new_shape[0] -", "rewards, gamma): base_value = values[:, -1] with torch.no_grad(): cummulative_reward = discounted_commulative_reward(rewards, base_value, gamma)", "loss def reward_prediction_loss(predictions, rewards): with torch.no_grad(): target = torch.zeros(predictions.size(), dtype=torch.float32, device=predictions.device) target[:, 0]", "self.env.step(action) self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32) self.last_action_reward[action] = 1.0 self.last_action_reward[-1] = np.clip(reward,", ":-1], 2, q_actions) loss = F.mse_loss(pseudo_rewards, q_actions) return loss def reward_prediction_loss(predictions, rewards): with", "dtype=np.float32) return self.observation(self.env.reset()) def step(self, action): observation, reward, done, stats = self.env.step(action) self.last_action_reward", "* cell_size, output_size)) margin3_top = (shape[0] - new_shape[0]) // 2 margin3_bottom = -(shape[0]", "cummulative_reward = rewards.clone() max_t = cummulative_reward.size()[1] for i in reversed(range(max_t)): next_values = base_value", "1, dtype=np.float32) self.last_action_reward[action] = 1.0 self.last_action_reward[-1] = np.clip(reward, -1, 1) return self.observation(observation), reward,", "cell_size: The size of each cell. Returns: shape (B, T, 1, H /", "avg_abs_diff.view(*obs_shape[:2] + avg_abs_diff.size()[1:]) def pixel_control_loss(observations, actions, action_values, gamma=0.9, cell_size=4): action_value_shape = action_values.size() batch_shape", "previous_rewards) q_actions = actions.view(*batch_shape + (1, 1, 1)).repeat(1, 1, 1, action_value_shape[3], action_value_shape[4]) q_actions", "target) def discounted_commulative_reward(rewards, base_value, gamma): cummulative_reward = rewards.clone() max_t = cummulative_reward.size()[1] for i", "import numpy as np def autocrop_observations(observations, cell_size, output_size=None): shape = observations.size()[3:] if output_size", "self.last_action_reward = None self.observation_space = gym.spaces.Tuple(( env.observation_space, gym.spaces.Box(0.0, 1.0, (env.action_space.n + 1,), dtype=np.float32)", "rewards): with torch.no_grad(): target = torch.zeros(predictions.size(), dtype=torch.float32, device=predictions.device) target[:, 0] = rewards ==", "height, `W` is width. * `C...` is at least one channel dimension (e.g.,", "> 0 target[:, 2] = rewards < 0 return F.binary_cross_entropy_with_logits(predictions, target) def discounted_commulative_reward(rewards,", "return cummulative_reward def value_loss(values, rewards, gamma): base_value = values[:, -1] with torch.no_grad(): cummulative_reward", "= last_rewards if i + 1 == T else pseudo_rewards[:, i + 1]", "values[:, -1] with torch.no_grad(): cummulative_reward = discounted_commulative_reward(rewards, base_value, gamma) return F.mse_loss(values[:, :-1], cummulative_reward)", "action_values, gamma=0.9, cell_size=4): action_value_shape = action_values.size() batch_shape = actions.size()[:2] with torch.no_grad(): T =", "of each cell. Returns: shape (B, T, 1, H / cell_size, W /", "pixel_control_reward(observations, cell_size=4, output_size=None): ''' Args: observations: A tensor of shape `[B,T+1,C,H,W]`, where *", "last_rewards = action_values[:, -1].max(1, keepdim=True)[0] for i in reversed(range(T)): previous_rewards = last_rewards if", "__init__(self, env): super().__init__(env) self.last_action_reward = None self.observation_space = gym.spaces.Tuple(( env.observation_space, gym.spaces.Box(0.0, 1.0, (env.action_space.n", "else pseudo_rewards[:, i + 1] pseudo_rewards[:, i].add_(gamma, previous_rewards) q_actions = actions.view(*batch_shape + (1,", "self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32) return self.observation(self.env.reset()) def step(self, action): observation, reward,", "action_value_shape[3], action_value_shape[4]) q_actions = torch.gather(action_values[:, :-1], 2, q_actions) loss = F.mse_loss(pseudo_rewards, q_actions) return", "1 == max_t else cummulative_reward[:, i + 1] cummulative_reward[:, i].add_(gamma, next_values) return cummulative_reward", "return avg_abs_diff.view(*obs_shape[:2] + avg_abs_diff.size()[1:]) def pixel_control_loss(observations, actions, action_values, gamma=0.9, cell_size=4): action_value_shape = action_values.size()", "def autocrop_observations(observations, cell_size, output_size=None): shape = observations.size()[3:] if output_size is None: new_shape =", "= gym.spaces.Tuple(( env.observation_space, gym.spaces.Box(0.0, 1.0, (env.action_space.n + 1,), dtype=np.float32) )) def reset(self): self.last_action_reward", "dtype=torch.float32, device=predictions.device) target[:, 0] = rewards == 0 target[:, 1] = rewards >", "avg_abs_diff = avg_abs_diff.mean(1, keepdim=True) return avg_abs_diff.view(*obs_shape[:2] + avg_abs_diff.size()[1:]) def pixel_control_loss(observations, actions, action_values, gamma=0.9,", "(shape[1] - new_shape[1]) // 2 margin4_bottom = -(shape[1] - new_shape[1] - margin4_top) if", "`W` is width. * `C...` is at least one channel dimension (e.g., colour,", "None: new_shape = tuple(map(lambda x: (x // cell_size) * cell_size, shape)) else: new_shape", "== 0: margin4_bottom = None return observations[:, :, :, margin3_top:margin3_bottom, margin4_top:margin4_bottom] def pixel_control_reward(observations,", "rewards > 0 target[:, 2] = rewards < 0 return F.binary_cross_entropy_with_logits(predictions, target) def", "torch.no_grad(): target = torch.zeros(predictions.size(), dtype=torch.float32, device=predictions.device) target[:, 0] = rewards == 0 target[:,", "is the sequence length, `B` is the batch size. * `H` is height,", "gym.spaces.Tuple(( env.observation_space, gym.spaces.Box(0.0, 1.0, (env.action_space.n + 1,), dtype=np.float32) )) def reset(self): self.last_action_reward =", "length, `B` is the batch size. * `H` is height, `W` is width.", "gamma=0.9, cell_size=4): action_value_shape = action_values.size() batch_shape = actions.size()[:2] with torch.no_grad(): T = observations.size()[1]", "= cummulative_reward.size()[1] for i in reversed(range(max_t)): next_values = base_value if i + 1", "(x // cell_size) * cell_size, shape)) else: new_shape = tuple(map(lambda x: x *", "least one channel dimension (e.g., colour, stack). * `T` and `B` can be", "if margin3_bottom == 0: margin3_bottom = None if margin4_bottom == 0: margin4_bottom =", "numpy as np def autocrop_observations(observations, cell_size, output_size=None): shape = observations.size()[3:] if output_size is", "torch.no_grad(): T = observations.size()[1] - 1 pseudo_rewards = pixel_control_reward(observations, cell_size, output_size=action_values.size()[-2:]) last_rewards =", "1] cummulative_reward[:, i].add_(gamma, next_values) return cummulative_reward def value_loss(values, rewards, gamma): base_value = values[:,", "stats = self.env.step(action) self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32) self.last_action_reward[action] = 1.0 self.last_action_reward[-1]", "loss = F.mse_loss(pseudo_rewards, q_actions) return loss def reward_prediction_loss(predictions, rewards): with torch.no_grad(): target =", "def pixel_control_reward(observations, cell_size=4, output_size=None): ''' Args: observations: A tensor of shape `[B,T+1,C,H,W]`, where", "as F import gym import gym.spaces import numpy as np def autocrop_observations(observations, cell_size,", "keepdim=True) return avg_abs_diff.view(*obs_shape[:2] + avg_abs_diff.size()[1:]) def pixel_control_loss(observations, actions, action_values, gamma=0.9, cell_size=4): action_value_shape =", "2 margin3_bottom = -(shape[0] - new_shape[0] - margin3_top) margin4_top = (shape[1] - new_shape[1])", "margin4_top:margin4_bottom] def pixel_control_reward(observations, cell_size=4, output_size=None): ''' Args: observations: A tensor of shape `[B,T+1,C,H,W]`,", "Args: observations: A tensor of shape `[B,T+1,C,H,W]`, where * `T` is the sequence", "cell_size, output_size=action_values.size()[-2:]) last_rewards = action_values[:, -1].max(1, keepdim=True)[0] for i in reversed(range(T)): previous_rewards =", "observations[:, :-1] abs_observation_diff.abs_() obs_shape = abs_observation_diff.size() abs_diff = abs_observation_diff.view(-1, *obs_shape[2:]) avg_abs_diff = F.avg_pool2d(abs_diff,", "F.binary_cross_entropy_with_logits(predictions, target) def discounted_commulative_reward(rewards, base_value, gamma): cummulative_reward = rewards.clone() max_t = cummulative_reward.size()[1] for", "reversed(range(max_t)): next_values = base_value if i + 1 == max_t else cummulative_reward[:, i", "observation, reward, done, stats = self.env.step(action) self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32) self.last_action_reward[action]", "+ avg_abs_diff.size()[1:]) def pixel_control_loss(observations, actions, action_values, gamma=0.9, cell_size=4): action_value_shape = action_values.size() batch_shape =", "base_value, gamma): cummulative_reward = rewards.clone() max_t = cummulative_reward.size()[1] for i in reversed(range(max_t)): next_values", "margin3_bottom = None if margin4_bottom == 0: margin4_bottom = None return observations[:, :,", "0] = rewards == 0 target[:, 1] = rewards > 0 target[:, 2]", "new_shape[1]) // 2 margin4_bottom = -(shape[1] - new_shape[1] - margin4_top) if margin3_bottom ==", "can be statically unknown. cell_size: The size of each cell. Returns: shape (B,", "at least one channel dimension (e.g., colour, stack). * `T` and `B` can", "1 pseudo_rewards = pixel_control_reward(observations, cell_size, output_size=action_values.size()[-2:]) last_rewards = action_values[:, -1].max(1, keepdim=True)[0] for i", "UnrealEnvBaseWrapper(gym.Wrapper): def __init__(self, env): super().__init__(env) self.last_action_reward = None self.observation_space = gym.spaces.Tuple(( env.observation_space, gym.spaces.Box(0.0,", "max_t else cummulative_reward[:, i + 1] cummulative_reward[:, i].add_(gamma, next_values) return cummulative_reward def value_loss(values,", "= avg_abs_diff.mean(1, keepdim=True) return avg_abs_diff.view(*obs_shape[:2] + avg_abs_diff.size()[1:]) def pixel_control_loss(observations, actions, action_values, gamma=0.9, cell_size=4):", "self.last_action_reward[-1] = np.clip(reward, -1, 1) return self.observation(observation), reward, done, stats def observation(self, observation):", "- new_shape[0]) // 2 margin3_bottom = -(shape[0] - new_shape[0] - margin3_top) margin4_top =", "discounted_commulative_reward(rewards, base_value, gamma) return F.mse_loss(values[:, :-1], cummulative_reward) class UnrealEnvBaseWrapper(gym.Wrapper): def __init__(self, env): super().__init__(env)", "= observations[:, 1:] - observations[:, :-1] abs_observation_diff.abs_() obs_shape = abs_observation_diff.size() abs_diff = abs_observation_diff.view(-1,", "= F.mse_loss(pseudo_rewards, q_actions) return loss def reward_prediction_loss(predictions, rewards): with torch.no_grad(): target = torch.zeros(predictions.size(),", "''' with torch.no_grad(): observations = autocrop_observations(observations, cell_size, output_size=output_size) abs_observation_diff = observations[:, 1:] -", "None if margin4_bottom == 0: margin4_bottom = None return observations[:, :, :, margin3_top:margin3_bottom,", "shape `[B,T+1,C,H,W]`, where * `T` is the sequence length, `B` is the batch", "action_value_shape = action_values.size() batch_shape = actions.size()[:2] with torch.no_grad(): T = observations.size()[1] - 1", "gamma) return F.mse_loss(values[:, :-1], cummulative_reward) class UnrealEnvBaseWrapper(gym.Wrapper): def __init__(self, env): super().__init__(env) self.last_action_reward =", "observations[:, :, :, margin3_top:margin3_bottom, margin4_top:margin4_bottom] def pixel_control_reward(observations, cell_size=4, output_size=None): ''' Args: observations: A", "(shape[0] - new_shape[0]) // 2 margin3_bottom = -(shape[0] - new_shape[0] - margin3_top) margin4_top", "target = torch.zeros(predictions.size(), dtype=torch.float32, device=predictions.device) target[:, 0] = rewards == 0 target[:, 1]", "gym.spaces import numpy as np def autocrop_observations(observations, cell_size, output_size=None): shape = observations.size()[3:] if", "rewards < 0 return F.binary_cross_entropy_with_logits(predictions, target) def discounted_commulative_reward(rewards, base_value, gamma): cummulative_reward = rewards.clone()", "+ 1 == T else pseudo_rewards[:, i + 1] pseudo_rewards[:, i].add_(gamma, previous_rewards) q_actions", "be statically unknown. cell_size: The size of each cell. Returns: shape (B, T,", "-1].max(1, keepdim=True)[0] for i in reversed(range(T)): previous_rewards = last_rewards if i + 1", "== 0 target[:, 1] = rewards > 0 target[:, 2] = rewards <", "def pixel_control_loss(observations, actions, action_values, gamma=0.9, cell_size=4): action_value_shape = action_values.size() batch_shape = actions.size()[:2] with", "margin3_top) margin4_top = (shape[1] - new_shape[1]) // 2 margin4_bottom = -(shape[1] - new_shape[1]", "= F.avg_pool2d(abs_diff, cell_size, stride=cell_size) avg_abs_diff = avg_abs_diff.mean(1, keepdim=True) return avg_abs_diff.view(*obs_shape[:2] + avg_abs_diff.size()[1:]) def", "= None if margin4_bottom == 0: margin4_bottom = None return observations[:, :, :,", "action_value_shape[4]) q_actions = torch.gather(action_values[:, :-1], 2, q_actions) loss = F.mse_loss(pseudo_rewards, q_actions) return loss", "super().__init__(env) self.last_action_reward = None self.observation_space = gym.spaces.Tuple(( env.observation_space, gym.spaces.Box(0.0, 1.0, (env.action_space.n + 1,),", "margin4_bottom = None return observations[:, :, :, margin3_top:margin3_bottom, margin4_top:margin4_bottom] def pixel_control_reward(observations, cell_size=4, output_size=None):", "= values[:, -1] with torch.no_grad(): cummulative_reward = discounted_commulative_reward(rewards, base_value, gamma) return F.mse_loss(values[:, :-1],", "env): super().__init__(env) self.last_action_reward = None self.observation_space = gym.spaces.Tuple(( env.observation_space, gym.spaces.Box(0.0, 1.0, (env.action_space.n +", "shape)) else: new_shape = tuple(map(lambda x: x * cell_size, output_size)) margin3_top = (shape[0]", "margin4_top = (shape[1] - new_shape[1]) // 2 margin4_bottom = -(shape[1] - new_shape[1] -", "new_shape = tuple(map(lambda x: (x // cell_size) * cell_size, shape)) else: new_shape =", "= -(shape[1] - new_shape[1] - margin4_top) if margin3_bottom == 0: margin3_bottom = None", "= None return observations[:, :, :, margin3_top:margin3_bottom, margin4_top:margin4_bottom] def pixel_control_reward(observations, cell_size=4, output_size=None): '''", "(env.action_space.n + 1,), dtype=np.float32) )) def reset(self): self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32)", "(B, T, 1, H / cell_size, W / cell_size) ''' with torch.no_grad(): observations", "import torch import torch.nn.functional as F import gym import gym.spaces import numpy as", "q_actions = torch.gather(action_values[:, :-1], 2, q_actions) loss = F.mse_loss(pseudo_rewards, q_actions) return loss def", "device=predictions.device) target[:, 0] = rewards == 0 target[:, 1] = rewards > 0", "np.clip(reward, -1, 1) return self.observation(observation), reward, done, stats def observation(self, observation): return (observation,", "= rewards > 0 target[:, 2] = rewards < 0 return F.binary_cross_entropy_with_logits(predictions, target)", "== max_t else cummulative_reward[:, i + 1] cummulative_reward[:, i].add_(gamma, next_values) return cummulative_reward def", "def discounted_commulative_reward(rewards, base_value, gamma): cummulative_reward = rewards.clone() max_t = cummulative_reward.size()[1] for i in", "= torch.zeros(predictions.size(), dtype=torch.float32, device=predictions.device) target[:, 0] = rewards == 0 target[:, 1] =", "T else pseudo_rewards[:, i + 1] pseudo_rewards[:, i].add_(gamma, previous_rewards) q_actions = actions.view(*batch_shape +", "avg_abs_diff.mean(1, keepdim=True) return avg_abs_diff.view(*obs_shape[:2] + avg_abs_diff.size()[1:]) def pixel_control_loss(observations, actions, action_values, gamma=0.9, cell_size=4): action_value_shape", "-1, 1) return self.observation(observation), reward, done, stats def observation(self, observation): return (observation, self.last_action_reward)", "cell_size=4, output_size=None): ''' Args: observations: A tensor of shape `[B,T+1,C,H,W]`, where * `T`", "= discounted_commulative_reward(rewards, base_value, gamma) return F.mse_loss(values[:, :-1], cummulative_reward) class UnrealEnvBaseWrapper(gym.Wrapper): def __init__(self, env):", "class UnrealEnvBaseWrapper(gym.Wrapper): def __init__(self, env): super().__init__(env) self.last_action_reward = None self.observation_space = gym.spaces.Tuple(( env.observation_space,", "and `B` can be statically unknown. cell_size: The size of each cell. Returns:", "q_actions = actions.view(*batch_shape + (1, 1, 1)).repeat(1, 1, 1, action_value_shape[3], action_value_shape[4]) q_actions =", "self.observation(self.env.reset()) def step(self, action): observation, reward, done, stats = self.env.step(action) self.last_action_reward = np.zeros(self.action_space.n", "the batch size. * `H` is height, `W` is width. * `C...` is", "/ cell_size, W / cell_size) ''' with torch.no_grad(): observations = autocrop_observations(observations, cell_size, output_size=output_size)", "step(self, action): observation, reward, done, stats = self.env.step(action) self.last_action_reward = np.zeros(self.action_space.n + 1,", "cell_size) * cell_size, shape)) else: new_shape = tuple(map(lambda x: x * cell_size, output_size))", "i].add_(gamma, next_values) return cummulative_reward def value_loss(values, rewards, gamma): base_value = values[:, -1] with", "= abs_observation_diff.view(-1, *obs_shape[2:]) avg_abs_diff = F.avg_pool2d(abs_diff, cell_size, stride=cell_size) avg_abs_diff = avg_abs_diff.mean(1, keepdim=True) return", "actions, action_values, gamma=0.9, cell_size=4): action_value_shape = action_values.size() batch_shape = actions.size()[:2] with torch.no_grad(): T", "obs_shape = abs_observation_diff.size() abs_diff = abs_observation_diff.view(-1, *obs_shape[2:]) avg_abs_diff = F.avg_pool2d(abs_diff, cell_size, stride=cell_size) avg_abs_diff", "i + 1] cummulative_reward[:, i].add_(gamma, next_values) return cummulative_reward def value_loss(values, rewards, gamma): base_value", "margin4_bottom = -(shape[1] - new_shape[1] - margin4_top) if margin3_bottom == 0: margin3_bottom =", "gym import gym.spaces import numpy as np def autocrop_observations(observations, cell_size, output_size=None): shape =", "torch.gather(action_values[:, :-1], 2, q_actions) loss = F.mse_loss(pseudo_rewards, q_actions) return loss def reward_prediction_loss(predictions, rewards):", "is the batch size. * `H` is height, `W` is width. * `C...`", "abs_observation_diff = observations[:, 1:] - observations[:, :-1] abs_observation_diff.abs_() obs_shape = abs_observation_diff.size() abs_diff =", "reward, done, stats = self.env.step(action) self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32) self.last_action_reward[action] =", "base_value, gamma) return F.mse_loss(values[:, :-1], cummulative_reward) class UnrealEnvBaseWrapper(gym.Wrapper): def __init__(self, env): super().__init__(env) self.last_action_reward", "`T` and `B` can be statically unknown. cell_size: The size of each cell.", "is at least one channel dimension (e.g., colour, stack). * `T` and `B`", "= actions.size()[:2] with torch.no_grad(): T = observations.size()[1] - 1 pseudo_rewards = pixel_control_reward(observations, cell_size,", "else: new_shape = tuple(map(lambda x: x * cell_size, output_size)) margin3_top = (shape[0] -", "i + 1] pseudo_rewards[:, i].add_(gamma, previous_rewards) q_actions = actions.view(*batch_shape + (1, 1, 1)).repeat(1,", "observations.size()[1] - 1 pseudo_rewards = pixel_control_reward(observations, cell_size, output_size=action_values.size()[-2:]) last_rewards = action_values[:, -1].max(1, keepdim=True)[0]", "return F.mse_loss(values[:, :-1], cummulative_reward) class UnrealEnvBaseWrapper(gym.Wrapper): def __init__(self, env): super().__init__(env) self.last_action_reward = None", "output_size=None): ''' Args: observations: A tensor of shape `[B,T+1,C,H,W]`, where * `T` is", "where * `T` is the sequence length, `B` is the batch size. *", "`H` is height, `W` is width. * `C...` is at least one channel", "observations = autocrop_observations(observations, cell_size, output_size=output_size) abs_observation_diff = observations[:, 1:] - observations[:, :-1] abs_observation_diff.abs_()", "< 0 return F.binary_cross_entropy_with_logits(predictions, target) def discounted_commulative_reward(rewards, base_value, gamma): cummulative_reward = rewards.clone() max_t", "= base_value if i + 1 == max_t else cummulative_reward[:, i + 1]", "- new_shape[0] - margin3_top) margin4_top = (shape[1] - new_shape[1]) // 2 margin4_bottom =", "`T` is the sequence length, `B` is the batch size. * `H` is", "x * cell_size, output_size)) margin3_top = (shape[0] - new_shape[0]) // 2 margin3_bottom =", "with torch.no_grad(): T = observations.size()[1] - 1 pseudo_rewards = pixel_control_reward(observations, cell_size, output_size=action_values.size()[-2:]) last_rewards", "None return observations[:, :, :, margin3_top:margin3_bottom, margin4_top:margin4_bottom] def pixel_control_reward(observations, cell_size=4, output_size=None): ''' Args:", "each cell. Returns: shape (B, T, 1, H / cell_size, W / cell_size)", "* `H` is height, `W` is width. * `C...` is at least one", "1.0, (env.action_space.n + 1,), dtype=np.float32) )) def reset(self): self.last_action_reward = np.zeros(self.action_space.n + 1,", "i + 1 == T else pseudo_rewards[:, i + 1] pseudo_rewards[:, i].add_(gamma, previous_rewards)", "cell_size, output_size=None): shape = observations.size()[3:] if output_size is None: new_shape = tuple(map(lambda x:", "batch size. * `H` is height, `W` is width. * `C...` is at", "abs_diff = abs_observation_diff.view(-1, *obs_shape[2:]) avg_abs_diff = F.avg_pool2d(abs_diff, cell_size, stride=cell_size) avg_abs_diff = avg_abs_diff.mean(1, keepdim=True)", "// cell_size) * cell_size, shape)) else: new_shape = tuple(map(lambda x: x * cell_size,", "+ 1] cummulative_reward[:, i].add_(gamma, next_values) return cummulative_reward def value_loss(values, rewards, gamma): base_value =", "for i in reversed(range(T)): previous_rewards = last_rewards if i + 1 == T", "1, 1)).repeat(1, 1, 1, action_value_shape[3], action_value_shape[4]) q_actions = torch.gather(action_values[:, :-1], 2, q_actions) loss", "reset(self): self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32) return self.observation(self.env.reset()) def step(self, action): observation,", "= (shape[0] - new_shape[0]) // 2 margin3_bottom = -(shape[0] - new_shape[0] - margin3_top)", "def reset(self): self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32) return self.observation(self.env.reset()) def step(self, action):", "2, q_actions) loss = F.mse_loss(pseudo_rewards, q_actions) return loss def reward_prediction_loss(predictions, rewards): with torch.no_grad():", "`[B,T+1,C,H,W]`, where * `T` is the sequence length, `B` is the batch size.", "Returns: shape (B, T, 1, H / cell_size, W / cell_size) ''' with", "import torch.nn.functional as F import gym import gym.spaces import numpy as np def", "new_shape[0]) // 2 margin3_bottom = -(shape[0] - new_shape[0] - margin3_top) margin4_top = (shape[1]", "= tuple(map(lambda x: (x // cell_size) * cell_size, shape)) else: new_shape = tuple(map(lambda", "0 return F.binary_cross_entropy_with_logits(predictions, target) def discounted_commulative_reward(rewards, base_value, gamma): cummulative_reward = rewards.clone() max_t =", "tensor of shape `[B,T+1,C,H,W]`, where * `T` is the sequence length, `B` is", "discounted_commulative_reward(rewards, base_value, gamma): cummulative_reward = rewards.clone() max_t = cummulative_reward.size()[1] for i in reversed(range(max_t)):", "in reversed(range(max_t)): next_values = base_value if i + 1 == max_t else cummulative_reward[:,", "0 target[:, 1] = rewards > 0 target[:, 2] = rewards < 0", "batch_shape = actions.size()[:2] with torch.no_grad(): T = observations.size()[1] - 1 pseudo_rewards = pixel_control_reward(observations,", "- 1 pseudo_rewards = pixel_control_reward(observations, cell_size, output_size=action_values.size()[-2:]) last_rewards = action_values[:, -1].max(1, keepdim=True)[0] for", "gym.spaces.Box(0.0, 1.0, (env.action_space.n + 1,), dtype=np.float32) )) def reset(self): self.last_action_reward = np.zeros(self.action_space.n +", "margin3_bottom = -(shape[0] - new_shape[0] - margin3_top) margin4_top = (shape[1] - new_shape[1]) //", "// 2 margin4_bottom = -(shape[1] - new_shape[1] - margin4_top) if margin3_bottom == 0:", "pixel_control_reward(observations, cell_size, output_size=action_values.size()[-2:]) last_rewards = action_values[:, -1].max(1, keepdim=True)[0] for i in reversed(range(T)): previous_rewards", "def reward_prediction_loss(predictions, rewards): with torch.no_grad(): target = torch.zeros(predictions.size(), dtype=torch.float32, device=predictions.device) target[:, 0] =", "1] = rewards > 0 target[:, 2] = rewards < 0 return F.binary_cross_entropy_with_logits(predictions,", "= abs_observation_diff.size() abs_diff = abs_observation_diff.view(-1, *obs_shape[2:]) avg_abs_diff = F.avg_pool2d(abs_diff, cell_size, stride=cell_size) avg_abs_diff =", "value_loss(values, rewards, gamma): base_value = values[:, -1] with torch.no_grad(): cummulative_reward = discounted_commulative_reward(rewards, base_value,", "base_value = values[:, -1] with torch.no_grad(): cummulative_reward = discounted_commulative_reward(rewards, base_value, gamma) return F.mse_loss(values[:,", "np.zeros(self.action_space.n + 1, dtype=np.float32) self.last_action_reward[action] = 1.0 self.last_action_reward[-1] = np.clip(reward, -1, 1) return", "cell_size) ''' with torch.no_grad(): observations = autocrop_observations(observations, cell_size, output_size=output_size) abs_observation_diff = observations[:, 1:]", "pseudo_rewards = pixel_control_reward(observations, cell_size, output_size=action_values.size()[-2:]) last_rewards = action_values[:, -1].max(1, keepdim=True)[0] for i in", "cell_size=4): action_value_shape = action_values.size() batch_shape = actions.size()[:2] with torch.no_grad(): T = observations.size()[1] -", "with torch.no_grad(): cummulative_reward = discounted_commulative_reward(rewards, base_value, gamma) return F.mse_loss(values[:, :-1], cummulative_reward) class UnrealEnvBaseWrapper(gym.Wrapper):", "new_shape[0] - margin3_top) margin4_top = (shape[1] - new_shape[1]) // 2 margin4_bottom = -(shape[1]", "observations.size()[3:] if output_size is None: new_shape = tuple(map(lambda x: (x // cell_size) *", "(e.g., colour, stack). * `T` and `B` can be statically unknown. cell_size: The", "statically unknown. cell_size: The size of each cell. Returns: shape (B, T, 1,", "x: x * cell_size, output_size)) margin3_top = (shape[0] - new_shape[0]) // 2 margin3_bottom", "= 1.0 self.last_action_reward[-1] = np.clip(reward, -1, 1) return self.observation(observation), reward, done, stats def", "import gym.spaces import numpy as np def autocrop_observations(observations, cell_size, output_size=None): shape = observations.size()[3:]", "stride=cell_size) avg_abs_diff = avg_abs_diff.mean(1, keepdim=True) return avg_abs_diff.view(*obs_shape[:2] + avg_abs_diff.size()[1:]) def pixel_control_loss(observations, actions, action_values,", "previous_rewards = last_rewards if i + 1 == T else pseudo_rewards[:, i +", "= np.zeros(self.action_space.n + 1, dtype=np.float32) self.last_action_reward[action] = 1.0 self.last_action_reward[-1] = np.clip(reward, -1, 1)", "import gym import gym.spaces import numpy as np def autocrop_observations(observations, cell_size, output_size=None): shape", "None self.observation_space = gym.spaces.Tuple(( env.observation_space, gym.spaces.Box(0.0, 1.0, (env.action_space.n + 1,), dtype=np.float32) )) def", "+ 1 == max_t else cummulative_reward[:, i + 1] cummulative_reward[:, i].add_(gamma, next_values) return", "i in reversed(range(T)): previous_rewards = last_rewards if i + 1 == T else", "1, H / cell_size, W / cell_size) ''' with torch.no_grad(): observations = autocrop_observations(observations,", "== T else pseudo_rewards[:, i + 1] pseudo_rewards[:, i].add_(gamma, previous_rewards) q_actions = actions.view(*batch_shape", "= -(shape[0] - new_shape[0] - margin3_top) margin4_top = (shape[1] - new_shape[1]) // 2", "= observations.size()[1] - 1 pseudo_rewards = pixel_control_reward(observations, cell_size, output_size=action_values.size()[-2:]) last_rewards = action_values[:, -1].max(1,", "output_size is None: new_shape = tuple(map(lambda x: (x // cell_size) * cell_size, shape))", "return loss def reward_prediction_loss(predictions, rewards): with torch.no_grad(): target = torch.zeros(predictions.size(), dtype=torch.float32, device=predictions.device) target[:,", "- new_shape[1] - margin4_top) if margin3_bottom == 0: margin3_bottom = None if margin4_bottom", "tuple(map(lambda x: (x // cell_size) * cell_size, shape)) else: new_shape = tuple(map(lambda x:", "torch.nn.functional as F import gym import gym.spaces import numpy as np def autocrop_observations(observations,", "pseudo_rewards[:, i].add_(gamma, previous_rewards) q_actions = actions.view(*batch_shape + (1, 1, 1)).repeat(1, 1, 1, action_value_shape[3],", "new_shape[1] - margin4_top) if margin3_bottom == 0: margin3_bottom = None if margin4_bottom ==", "margin3_bottom == 0: margin3_bottom = None if margin4_bottom == 0: margin4_bottom = None", "F.mse_loss(pseudo_rewards, q_actions) return loss def reward_prediction_loss(predictions, rewards): with torch.no_grad(): target = torch.zeros(predictions.size(), dtype=torch.float32,", "= np.clip(reward, -1, 1) return self.observation(observation), reward, done, stats def observation(self, observation): return", "= rewards == 0 target[:, 1] = rewards > 0 target[:, 2] =", "shape (B, T, 1, H / cell_size, W / cell_size) ''' with torch.no_grad():", "last_rewards if i + 1 == T else pseudo_rewards[:, i + 1] pseudo_rewards[:,", "cell. Returns: shape (B, T, 1, H / cell_size, W / cell_size) '''", "is None: new_shape = tuple(map(lambda x: (x // cell_size) * cell_size, shape)) else:", "`B` can be statically unknown. cell_size: The size of each cell. Returns: shape", "dimension (e.g., colour, stack). * `T` and `B` can be statically unknown. cell_size:", "self.observation_space = gym.spaces.Tuple(( env.observation_space, gym.spaces.Box(0.0, 1.0, (env.action_space.n + 1,), dtype=np.float32) )) def reset(self):", "T = observations.size()[1] - 1 pseudo_rewards = pixel_control_reward(observations, cell_size, output_size=action_values.size()[-2:]) last_rewards = action_values[:,", "`B` is the batch size. * `H` is height, `W` is width. *", "pixel_control_loss(observations, actions, action_values, gamma=0.9, cell_size=4): action_value_shape = action_values.size() batch_shape = actions.size()[:2] with torch.no_grad():", "1, 1, action_value_shape[3], action_value_shape[4]) q_actions = torch.gather(action_values[:, :-1], 2, q_actions) loss = F.mse_loss(pseudo_rewards,", "shape = observations.size()[3:] if output_size is None: new_shape = tuple(map(lambda x: (x //", "target[:, 2] = rewards < 0 return F.binary_cross_entropy_with_logits(predictions, target) def discounted_commulative_reward(rewards, base_value, gamma):", "reversed(range(T)): previous_rewards = last_rewards if i + 1 == T else pseudo_rewards[:, i", "abs_observation_diff.size() abs_diff = abs_observation_diff.view(-1, *obs_shape[2:]) avg_abs_diff = F.avg_pool2d(abs_diff, cell_size, stride=cell_size) avg_abs_diff = avg_abs_diff.mean(1,", "- margin4_top) if margin3_bottom == 0: margin3_bottom = None if margin4_bottom == 0:", "self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32) self.last_action_reward[action] = 1.0 self.last_action_reward[-1] = np.clip(reward, -1,", "if i + 1 == T else pseudo_rewards[:, i + 1] pseudo_rewards[:, i].add_(gamma,", "base_value if i + 1 == max_t else cummulative_reward[:, i + 1] cummulative_reward[:,", "next_values = base_value if i + 1 == max_t else cummulative_reward[:, i +", "size. * `H` is height, `W` is width. * `C...` is at least", "1:] - observations[:, :-1] abs_observation_diff.abs_() obs_shape = abs_observation_diff.size() abs_diff = abs_observation_diff.view(-1, *obs_shape[2:]) avg_abs_diff", "= pixel_control_reward(observations, cell_size, output_size=action_values.size()[-2:]) last_rewards = action_values[:, -1].max(1, keepdim=True)[0] for i in reversed(range(T)):", "*obs_shape[2:]) avg_abs_diff = F.avg_pool2d(abs_diff, cell_size, stride=cell_size) avg_abs_diff = avg_abs_diff.mean(1, keepdim=True) return avg_abs_diff.view(*obs_shape[:2] +", "avg_abs_diff = F.avg_pool2d(abs_diff, cell_size, stride=cell_size) avg_abs_diff = avg_abs_diff.mean(1, keepdim=True) return avg_abs_diff.view(*obs_shape[:2] + avg_abs_diff.size()[1:])", "actions.size()[:2] with torch.no_grad(): T = observations.size()[1] - 1 pseudo_rewards = pixel_control_reward(observations, cell_size, output_size=action_values.size()[-2:])", "= None self.observation_space = gym.spaces.Tuple(( env.observation_space, gym.spaces.Box(0.0, 1.0, (env.action_space.n + 1,), dtype=np.float32) ))", "i].add_(gamma, previous_rewards) q_actions = actions.view(*batch_shape + (1, 1, 1)).repeat(1, 1, 1, action_value_shape[3], action_value_shape[4])", "1 == T else pseudo_rewards[:, i + 1] pseudo_rewards[:, i].add_(gamma, previous_rewards) q_actions =", "if margin4_bottom == 0: margin4_bottom = None return observations[:, :, :, margin3_top:margin3_bottom, margin4_top:margin4_bottom]", "as np def autocrop_observations(observations, cell_size, output_size=None): shape = observations.size()[3:] if output_size is None:", "= action_values[:, -1].max(1, keepdim=True)[0] for i in reversed(range(T)): previous_rewards = last_rewards if i", "0: margin3_bottom = None if margin4_bottom == 0: margin4_bottom = None return observations[:,", "reward_prediction_loss(predictions, rewards): with torch.no_grad(): target = torch.zeros(predictions.size(), dtype=torch.float32, device=predictions.device) target[:, 0] = rewards", "= actions.view(*batch_shape + (1, 1, 1)).repeat(1, 1, 1, action_value_shape[3], action_value_shape[4]) q_actions = torch.gather(action_values[:,", "self.last_action_reward[action] = 1.0 self.last_action_reward[-1] = np.clip(reward, -1, 1) return self.observation(observation), reward, done, stats", "tuple(map(lambda x: x * cell_size, output_size)) margin3_top = (shape[0] - new_shape[0]) // 2", "* `T` and `B` can be statically unknown. cell_size: The size of each", "2 margin4_bottom = -(shape[1] - new_shape[1] - margin4_top) if margin3_bottom == 0: margin3_bottom", "abs_observation_diff.view(-1, *obs_shape[2:]) avg_abs_diff = F.avg_pool2d(abs_diff, cell_size, stride=cell_size) avg_abs_diff = avg_abs_diff.mean(1, keepdim=True) return avg_abs_diff.view(*obs_shape[:2]", "output_size=action_values.size()[-2:]) last_rewards = action_values[:, -1].max(1, keepdim=True)[0] for i in reversed(range(T)): previous_rewards = last_rewards", "// 2 margin3_bottom = -(shape[0] - new_shape[0] - margin3_top) margin4_top = (shape[1] -", "if output_size is None: new_shape = tuple(map(lambda x: (x // cell_size) * cell_size,", "cell_size, shape)) else: new_shape = tuple(map(lambda x: x * cell_size, output_size)) margin3_top =", "is height, `W` is width. * `C...` is at least one channel dimension", "cell_size, W / cell_size) ''' with torch.no_grad(): observations = autocrop_observations(observations, cell_size, output_size=output_size) abs_observation_diff", "-1] with torch.no_grad(): cummulative_reward = discounted_commulative_reward(rewards, base_value, gamma) return F.mse_loss(values[:, :-1], cummulative_reward) class", "1)).repeat(1, 1, 1, action_value_shape[3], action_value_shape[4]) q_actions = torch.gather(action_values[:, :-1], 2, q_actions) loss =", "dtype=np.float32) self.last_action_reward[action] = 1.0 self.last_action_reward[-1] = np.clip(reward, -1, 1) return self.observation(observation), reward, done,", "1, action_value_shape[3], action_value_shape[4]) q_actions = torch.gather(action_values[:, :-1], 2, q_actions) loss = F.mse_loss(pseudo_rewards, q_actions)", "torch import torch.nn.functional as F import gym import gym.spaces import numpy as np", ":-1] abs_observation_diff.abs_() obs_shape = abs_observation_diff.size() abs_diff = abs_observation_diff.view(-1, *obs_shape[2:]) avg_abs_diff = F.avg_pool2d(abs_diff, cell_size,", "= (shape[1] - new_shape[1]) // 2 margin4_bottom = -(shape[1] - new_shape[1] - margin4_top)", "+ 1,), dtype=np.float32) )) def reset(self): self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32) return", "= tuple(map(lambda x: x * cell_size, output_size)) margin3_top = (shape[0] - new_shape[0]) //", "action_values.size() batch_shape = actions.size()[:2] with torch.no_grad(): T = observations.size()[1] - 1 pseudo_rewards =", "+ 1, dtype=np.float32) return self.observation(self.env.reset()) def step(self, action): observation, reward, done, stats =", "F import gym import gym.spaces import numpy as np def autocrop_observations(observations, cell_size, output_size=None):", "is width. * `C...` is at least one channel dimension (e.g., colour, stack).", "* cell_size, shape)) else: new_shape = tuple(map(lambda x: x * cell_size, output_size)) margin3_top", "unknown. cell_size: The size of each cell. Returns: shape (B, T, 1, H", "avg_abs_diff.size()[1:]) def pixel_control_loss(observations, actions, action_values, gamma=0.9, cell_size=4): action_value_shape = action_values.size() batch_shape = actions.size()[:2]", ")) def reset(self): self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32) return self.observation(self.env.reset()) def step(self,", "+ 1] pseudo_rewards[:, i].add_(gamma, previous_rewards) q_actions = actions.view(*batch_shape + (1, 1, 1)).repeat(1, 1,", "margin3_top:margin3_bottom, margin4_top:margin4_bottom] def pixel_control_reward(observations, cell_size=4, output_size=None): ''' Args: observations: A tensor of shape", "-(shape[1] - new_shape[1] - margin4_top) if margin3_bottom == 0: margin3_bottom = None if", "0: margin4_bottom = None return observations[:, :, :, margin3_top:margin3_bottom, margin4_top:margin4_bottom] def pixel_control_reward(observations, cell_size=4,", "= torch.gather(action_values[:, :-1], 2, q_actions) loss = F.mse_loss(pseudo_rewards, q_actions) return loss def reward_prediction_loss(predictions,", "= np.zeros(self.action_space.n + 1, dtype=np.float32) return self.observation(self.env.reset()) def step(self, action): observation, reward, done,", "+ (1, 1, 1)).repeat(1, 1, 1, action_value_shape[3], action_value_shape[4]) q_actions = torch.gather(action_values[:, :-1], 2,", "np def autocrop_observations(observations, cell_size, output_size=None): shape = observations.size()[3:] if output_size is None: new_shape", "+ 1, dtype=np.float32) self.last_action_reward[action] = 1.0 self.last_action_reward[-1] = np.clip(reward, -1, 1) return self.observation(observation),", "def step(self, action): observation, reward, done, stats = self.env.step(action) self.last_action_reward = np.zeros(self.action_space.n +", "= autocrop_observations(observations, cell_size, output_size=output_size) abs_observation_diff = observations[:, 1:] - observations[:, :-1] abs_observation_diff.abs_() obs_shape", "next_values) return cummulative_reward def value_loss(values, rewards, gamma): base_value = values[:, -1] with torch.no_grad():", "q_actions) loss = F.mse_loss(pseudo_rewards, q_actions) return loss def reward_prediction_loss(predictions, rewards): with torch.no_grad(): target", "gamma): cummulative_reward = rewards.clone() max_t = cummulative_reward.size()[1] for i in reversed(range(max_t)): next_values =", "= rewards < 0 return F.binary_cross_entropy_with_logits(predictions, target) def discounted_commulative_reward(rewards, base_value, gamma): cummulative_reward =", "observations: A tensor of shape `[B,T+1,C,H,W]`, where * `T` is the sequence length,", "1.0 self.last_action_reward[-1] = np.clip(reward, -1, 1) return self.observation(observation), reward, done, stats def observation(self,", "(1, 1, 1)).repeat(1, 1, 1, action_value_shape[3], action_value_shape[4]) q_actions = torch.gather(action_values[:, :-1], 2, q_actions)", "T, 1, H / cell_size, W / cell_size) ''' with torch.no_grad(): observations =", "- observations[:, :-1] abs_observation_diff.abs_() obs_shape = abs_observation_diff.size() abs_diff = abs_observation_diff.view(-1, *obs_shape[2:]) avg_abs_diff =", "torch.zeros(predictions.size(), dtype=torch.float32, device=predictions.device) target[:, 0] = rewards == 0 target[:, 1] = rewards", "rewards == 0 target[:, 1] = rewards > 0 target[:, 2] = rewards", "`C...` is at least one channel dimension (e.g., colour, stack). * `T` and", ":-1], cummulative_reward) class UnrealEnvBaseWrapper(gym.Wrapper): def __init__(self, env): super().__init__(env) self.last_action_reward = None self.observation_space =", "cummulative_reward[:, i].add_(gamma, next_values) return cummulative_reward def value_loss(values, rewards, gamma): base_value = values[:, -1]", "1,), dtype=np.float32) )) def reset(self): self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32) return self.observation(self.env.reset())", "cummulative_reward = discounted_commulative_reward(rewards, base_value, gamma) return F.mse_loss(values[:, :-1], cummulative_reward) class UnrealEnvBaseWrapper(gym.Wrapper): def __init__(self,", "F.avg_pool2d(abs_diff, cell_size, stride=cell_size) avg_abs_diff = avg_abs_diff.mean(1, keepdim=True) return avg_abs_diff.view(*obs_shape[:2] + avg_abs_diff.size()[1:]) def pixel_control_loss(observations,", "return F.binary_cross_entropy_with_logits(predictions, target) def discounted_commulative_reward(rewards, base_value, gamma): cummulative_reward = rewards.clone() max_t = cummulative_reward.size()[1]", "2] = rewards < 0 return F.binary_cross_entropy_with_logits(predictions, target) def discounted_commulative_reward(rewards, base_value, gamma): cummulative_reward", "pseudo_rewards[:, i + 1] pseudo_rewards[:, i].add_(gamma, previous_rewards) q_actions = actions.view(*batch_shape + (1, 1,", "= rewards.clone() max_t = cummulative_reward.size()[1] for i in reversed(range(max_t)): next_values = base_value if", "the sequence length, `B` is the batch size. * `H` is height, `W`", "- margin3_top) margin4_top = (shape[1] - new_shape[1]) // 2 margin4_bottom = -(shape[1] -", "0 target[:, 2] = rewards < 0 return F.binary_cross_entropy_with_logits(predictions, target) def discounted_commulative_reward(rewards, base_value,", "else cummulative_reward[:, i + 1] cummulative_reward[:, i].add_(gamma, next_values) return cummulative_reward def value_loss(values, rewards,", "dtype=np.float32) )) def reset(self): self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32) return self.observation(self.env.reset()) def", "new_shape = tuple(map(lambda x: x * cell_size, output_size)) margin3_top = (shape[0] - new_shape[0])", "cell_size, output_size=output_size) abs_observation_diff = observations[:, 1:] - observations[:, :-1] abs_observation_diff.abs_() obs_shape = abs_observation_diff.size()", "actions.view(*batch_shape + (1, 1, 1)).repeat(1, 1, 1, action_value_shape[3], action_value_shape[4]) q_actions = torch.gather(action_values[:, :-1],", "env.observation_space, gym.spaces.Box(0.0, 1.0, (env.action_space.n + 1,), dtype=np.float32) )) def reset(self): self.last_action_reward = np.zeros(self.action_space.n", "margin4_top) if margin3_bottom == 0: margin3_bottom = None if margin4_bottom == 0: margin4_bottom", "output_size=output_size) abs_observation_diff = observations[:, 1:] - observations[:, :-1] abs_observation_diff.abs_() obs_shape = abs_observation_diff.size() abs_diff", "The size of each cell. Returns: shape (B, T, 1, H / cell_size,", ":, :, margin3_top:margin3_bottom, margin4_top:margin4_bottom] def pixel_control_reward(observations, cell_size=4, output_size=None): ''' Args: observations: A tensor", "gamma): base_value = values[:, -1] with torch.no_grad(): cummulative_reward = discounted_commulative_reward(rewards, base_value, gamma) return", "width. * `C...` is at least one channel dimension (e.g., colour, stack). *", "x: (x // cell_size) * cell_size, shape)) else: new_shape = tuple(map(lambda x: x", "1] pseudo_rewards[:, i].add_(gamma, previous_rewards) q_actions = actions.view(*batch_shape + (1, 1, 1)).repeat(1, 1, 1,", "def __init__(self, env): super().__init__(env) self.last_action_reward = None self.observation_space = gym.spaces.Tuple(( env.observation_space, gym.spaces.Box(0.0, 1.0,", "action_values[:, -1].max(1, keepdim=True)[0] for i in reversed(range(T)): previous_rewards = last_rewards if i +", "for i in reversed(range(max_t)): next_values = base_value if i + 1 == max_t", "''' Args: observations: A tensor of shape `[B,T+1,C,H,W]`, where * `T` is the", "stack). * `T` and `B` can be statically unknown. cell_size: The size of", "cummulative_reward def value_loss(values, rewards, gamma): base_value = values[:, -1] with torch.no_grad(): cummulative_reward =", "def value_loss(values, rewards, gamma): base_value = values[:, -1] with torch.no_grad(): cummulative_reward = discounted_commulative_reward(rewards,", "1, dtype=np.float32) return self.observation(self.env.reset()) def step(self, action): observation, reward, done, stats = self.env.step(action)", "i in reversed(range(max_t)): next_values = base_value if i + 1 == max_t else", "rewards.clone() max_t = cummulative_reward.size()[1] for i in reversed(range(max_t)): next_values = base_value if i" ]
[ "experiment, rectangle_gate ): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\",", "None @responses.activate def test_fcs_file_and_fcs_file_id_defined( ENDPOINT_BASE, experiment, experiments, gates ): responses.add( responses.POST, ENDPOINT_BASE +", "FCSFILE_ID}], [{EXP_ID: FCSFILE_ID}]), ] @pytest.mark.parametrize(\"fcs_file_args,expected_response\", params) @responses.activate def test_should_create_fcs_file_and_correctly_parse_fcs_file_args( ENDPOINT_BASE, client, fcs_files, fcs_file_args,", "[fcs_files[0][\"_id\"]], \"new file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [\"5d64abe2ca9df61349ed8e79\"], \"filename\": \"new file\", }", "responses.add( responses.DELETE, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{fcs_file._id}\", ) deleted = fcs_file.delete() assert deleted is None", "status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file_id=\"5d38a7159fae87499999a751\", tailored_per_file=True,", "\"new file\", } @responses.activate def test_should_create_fcs_file_and_correctly_parse_body_args( ENDPOINT_BASE, client, fcs_files ): \"\"\"Test upload of", "project root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, [fcs_files[0][\"_id\"]], \"new", "responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1,", "@responses.activate def test_fcs_file_and_fcs_file_id_defined( ENDPOINT_BASE, experiment, experiments, gates ): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\",", "x1=1, x2=2, y1=3, y2=4, fcs_file_id=\"5d38a7159fae87499999a751\", tailored_per_file=True, ) assert ( json.loads(responses.calls[0].request.body)[\"fcsFileId\"] == \"5d38a7159fae87499999a751\" )", "json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True assert json.loads(responses.calls[0].request.body)[\"gid\"] == global_gid @responses.activate def test_specify_fcs_file_id(ENDPOINT_BASE, experiment, rectangle_gate): responses.add(", "FcsFile EXP_ID = \"5d38a6f79fae87499999a74b\" FCSFILE_ID = \"5d64abe2ca9df61349ed8e7c\" @responses.activate def test_should_get_fcs_file(ENDPOINT_BASE, client, fcs_files): file_id", "def test_create_global_tailored_gate(ENDPOINT_BASE, experiment, rectangle_gate): global_gid = generate_id() responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201,", "y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", tailored_per_file=True, ) assert json.loads(responses.calls[2].request.body)[\"tailoredPerFile\"] is True assert ( json.loads(responses.calls[2].request.body)[\"fcsFileId\"] ==", "+ f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4,", "FcsFile @responses.activate def test_should_create_fcs_file(ENDPOINT_BASE, client, fcs_files): \"\"\"Test upload of a new fcs_file. This", "\"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", tailored_per_file=True, ) assert json.loads(responses.calls[2].request.body)[\"tailoredPerFile\"] is", "file = FcsFile.get(EXP_ID, file_id) assert type(file) is FcsFile @responses.activate def test_should_create_fcs_file(ENDPOINT_BASE, client, fcs_files):", "responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{file_id}\", json=fcs_files[0], ) file = FcsFile.get(EXP_ID, file_id) assert type(file)", "@responses.activate def test_should_create_fcs_file_and_correctly_parse_fcs_file_args( ENDPOINT_BASE, client, fcs_files, fcs_file_args, expected_response ): \"\"\"Test upload of a", "FCSFILE_ID}]), ([{EXP_ID: FCSFILE_ID}], [{EXP_ID: FCSFILE_ID}]), ] @pytest.mark.parametrize(\"fcs_file_args,expected_response\", params) @responses.activate def test_should_create_fcs_file_and_correctly_parse_fcs_file_args( ENDPOINT_BASE, client,", "test_should_get_fcs_file(ENDPOINT_BASE, client, fcs_files): file_id = fcs_files[0][\"_id\"] responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{file_id}\", json=fcs_files[0], )", "ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/5d64abe2ca9df61349ed8e7c\", json=fcs_files[3], ) responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, )", "x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file_id=\"5d38a7159fae87499999a751\", tailored_per_file=True, ) assert ( json.loads(responses.calls[0].request.body)[\"fcsFileId\"]", "json=fcs_files[1], ) FcsFile.create( EXP_ID, FCSFILE_ID, \"new name\", add_file_number=True, add_event_number=True, pre_subsample_n=1, pre_subsample_p=1, ) assert", "ENDPOINT_BASE, experiment, experiments, gates ): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=gates[0], )", "'fcs_file_id'.\" ): experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", fcs_file_id=\"5d38a7159fae87499999a74e\", tailored_per_file=True,", "== { \"fcsFiles\": [FCSFILE_ID], \"filename\": \"new name\", \"addFileNumber\": True, \"addEventNumber\": True, \"preSubsampleN\": 1,", "the project root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, fcs_file_args,", "\"new name\", add_file_number=True, add_event_number=True, pre_subsample_n=1, pre_subsample_p=1, ) assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [FCSFILE_ID],", "@responses.activate def test_create_global_tailored_gate(ENDPOINT_BASE, experiment, rectangle_gate): global_gid = generate_id() responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\",", "must be run from the project root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\",", "f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file_id=\"5d38a7159fae87499999a751\",", "upload of a new fcs_file. This test must be run from the project", "\"fcsFiles\": [\"5d64abe2ca9df61349ed8e79\"], \"filename\": \"new file\", } params = [ (FCSFILE_ID, [FCSFILE_ID]), ([FCSFILE_ID], [FCSFILE_ID]),", "f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True,", "\"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4 ) assert json.loads(responses.calls[0].request.body)[\"fcsFileId\"] is None @responses.activate def", "directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create( EXP_ID, FCSFILE_ID, \"new name\",", "fcs_file=\"Specimen_001_A1_A01.fcs\", fcs_file_id=\"5d38a7159fae87499999a74e\", tailored_per_file=True, ) @responses.activate def test_tailored_per_file_true(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE +", "test_should_create_fcs_file_and_correctly_parse_fcs_file_args( ENDPOINT_BASE, client, fcs_files, fcs_file_args, expected_response ): \"\"\"Test upload of a new fcs_file.", "] @pytest.mark.parametrize(\"fcs_file_args,expected_response\", params) @responses.activate def test_should_create_fcs_file_and_correctly_parse_fcs_file_args( ENDPOINT_BASE, client, fcs_files, fcs_file_args, expected_response ): \"\"\"Test", "f\"/experiments/{EXP_ID}/fcsfiles/{fcs_file._id}\", ) deleted = fcs_file.delete() assert deleted is None @responses.activate def test_fcs_file_and_fcs_file_id_defined( ENDPOINT_BASE,", "name\", \"addFileNumber\": True, \"addEventNumber\": True, \"preSubsampleN\": 1, \"preSubsampleP\": 1 # leave out \"seed\"", "x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, gid=global_gid, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True assert json.loads(responses.calls[0].request.body)[\"gid\"]", "ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{file_id}\", json=fcs_files[0], ) file = FcsFile.get(EXP_ID, file_id) assert type(file) is FcsFile", "responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create( EXP_ID, FCSFILE_ID, \"new name\", add_file_number=True, add_event_number=True,", "), ({EXP_ID: FCSFILE_ID}, [{EXP_ID: FCSFILE_ID}]), ([{EXP_ID: FCSFILE_ID}], [{EXP_ID: FCSFILE_ID}]), ] @pytest.mark.parametrize(\"fcs_file_args,expected_response\", params) @responses.activate", "json=fcs_files[3], ) responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\",", "FcsFile.create(EXP_ID, fcs_file_args, \"new file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": expected_response, \"filename\": \"new file\",", "True, \"preSubsampleN\": 1, \"preSubsampleP\": 1 # leave out \"seed\" to test param not", "\"fcsFiles\": [FCSFILE_ID], \"filename\": \"new name\", \"addFileNumber\": True, \"addEventNumber\": True, \"preSubsampleN\": 1, \"preSubsampleP\": 1", "\"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", tailored_per_file=True, ) assert json.loads(responses.calls[2].request.body)[\"tailoredPerFile\"] is True", "ValueError, match=\"Please specify only 'fcs_file' or 'fcs_file_id'.\" ): experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1,", "f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, [fcs_files[0][\"_id\"]], \"new file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [\"5d64abe2ca9df61349ed8e79\"],", "assert deleted is None @responses.activate def test_fcs_file_and_fcs_file_id_defined( ENDPOINT_BASE, experiment, experiments, gates ): responses.add(", "): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\",", "f\"/experiments/{EXP_ID}/fcsfiles/5d64abe2ca9df61349ed8e7c\", json=fcs_files[3], ) responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\",", ") responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\",", "@responses.activate def test_should_create_fcs_file(ENDPOINT_BASE, client, fcs_files): \"\"\"Test upload of a new fcs_file. This test", "fcs_files, rectangle_gate): responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=[fcs_files[3]], ) responses.add( responses.GET, ENDPOINT_BASE +", "FCSFILE_ID}, [{EXP_ID: FCSFILE_ID}]), ([{EXP_ID: FCSFILE_ID}], [{EXP_ID: FCSFILE_ID}]), ] @pytest.mark.parametrize(\"fcs_file_args,expected_response\", params) @responses.activate def test_should_create_fcs_file_and_correctly_parse_fcs_file_args(", "tailored_per_file=True, ) assert ( json.loads(responses.calls[0].request.body)[\"fcsFileId\"] == \"5d38a7159fae87499999a751\" ) @responses.activate def test_fcs_file_called_by_name(ENDPOINT_BASE, experiment, fcs_files,", "json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True @responses.activate def test_fcs_file_id_is_None_and_fcs_file_is_None( ENDPOINT_BASE, experiment, rectangle_gate ): responses.add( responses.POST, ENDPOINT_BASE", ") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [FCSFILE_ID], \"filename\": \"new name\", \"addFileNumber\": True, \"addEventNumber\":", "rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\",", "@responses.activate def test_should_delete_fcs_file(ENDPOINT_BASE, client, fcs_files): fcs_file = FcsFile.from_dict(fcs_files[0]) responses.add( responses.DELETE, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{fcs_file._id}\",", ") assert json.loads(responses.calls[0].request.body)[\"fcsFileId\"] is None @responses.activate def test_create_global_tailored_gate(ENDPOINT_BASE, experiment, rectangle_gate): global_gid = generate_id()", "y2=4, tailored_per_file=True, gid=global_gid, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True assert json.loads(responses.calls[0].request.body)[\"gid\"] == global_gid @responses.activate", "is None @responses.activate def test_create_global_tailored_gate(ENDPOINT_BASE, experiment, rectangle_gate): global_gid = generate_id() responses.add( responses.POST, ENDPOINT_BASE", ") @responses.activate def test_fcs_file_called_by_name(ENDPOINT_BASE, experiment, fcs_files, rectangle_gate): responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=[fcs_files[3]],", "responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, [fcs_files[0][\"_id\"]], \"new file\") assert json.loads(responses.calls[0].request.body)", "y1=3, y2=4, tailored_per_file=True, gid=global_gid, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True assert json.loads(responses.calls[0].request.body)[\"gid\"] == global_gid", "\"new file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [\"5d64abe2ca9df61349ed8e79\"], \"filename\": \"new file\", } params", "assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True @responses.activate def test_fcs_file_id_is_None_and_fcs_file_is_None( ENDPOINT_BASE, experiment, rectangle_gate ): responses.add( responses.POST,", "\"filename\": \"new file\", } @responses.activate def test_should_create_fcs_file_and_correctly_parse_body_args( ENDPOINT_BASE, client, fcs_files ): \"\"\"Test upload", "f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create( EXP_ID, FCSFILE_ID, \"new name\", add_file_number=True, add_event_number=True, pre_subsample_n=1, pre_subsample_p=1, )", "file\", } @responses.activate def test_should_create_fcs_file_and_correctly_parse_body_args( ENDPOINT_BASE, client, fcs_files ): \"\"\"Test upload of a", "= generate_id() responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\",", "gid=global_gid, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True assert json.loads(responses.calls[0].request.body)[\"gid\"] == global_gid @responses.activate def test_specify_fcs_file_id(ENDPOINT_BASE,", "test_should_create_fcs_file(ENDPOINT_BASE, client, fcs_files): \"\"\"Test upload of a new fcs_file. This test must be", "+ f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4,", "+ f\"/experiments/{EXP_ID}/fcsfiles\", json=[fcs_files[3]], ) responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/5d64abe2ca9df61349ed8e7c\", json=fcs_files[3], ) responses.add( responses.POST,", "ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=gates[0], ) with pytest.raises( ValueError, match=\"Please specify only 'fcs_file'", "} @responses.activate def test_should_create_fcs_file_and_correctly_parse_body_args( ENDPOINT_BASE, client, fcs_files ): \"\"\"Test upload of a new", ") file = FcsFile.get(EXP_ID, file_id) assert type(file) is FcsFile @responses.activate def test_should_create_fcs_file(ENDPOINT_BASE, client,", "test must be run from the project root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE +", "experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, gid=global_gid, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"]", "\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4 ) assert json.loads(responses.calls[0].request.body)[\"fcsFileId\"] is None @responses.activate def test_create_global_tailored_gate(ENDPOINT_BASE,", "client, fcs_files, fcs_file_args, expected_response ): \"\"\"Test upload of a new fcs_file. This test", "project root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, fcs_file_args, \"new", "fcs_file_id=\"5d38a7159fae87499999a74e\", tailored_per_file=True, ) @responses.activate def test_tailored_per_file_true(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\",", "of a new fcs_file. This test must be run from the project root", "directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, [fcs_files[0][\"_id\"]], \"new file\") assert", "y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", fcs_file_id=\"5d38a7159fae87499999a74e\", tailored_per_file=True, ) @responses.activate def test_tailored_per_file_true(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE", "[{EXP_ID: FCSFILE_ID}]), ([{EXP_ID: FCSFILE_ID}], [{EXP_ID: FCSFILE_ID}]), ] @pytest.mark.parametrize(\"fcs_file_args,expected_response\", params) @responses.activate def test_should_create_fcs_file_and_correctly_parse_fcs_file_args( ENDPOINT_BASE,", "x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True @responses.activate def test_fcs_file_id_is_None_and_fcs_file_is_None(", "experiment, fcs_files, rectangle_gate): responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=[fcs_files[3]], ) responses.add( responses.GET, ENDPOINT_BASE", "file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": expected_response, \"filename\": \"new file\", } @responses.activate def", "\"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True @responses.activate", "ENDPOINT_BASE, experiment, rectangle_gate ): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate(", "json.loads(responses.calls[0].request.body) == { \"fcsFiles\": expected_response, \"filename\": \"new file\", } @responses.activate def test_should_create_fcs_file_and_correctly_parse_body_args( ENDPOINT_BASE,", "+ f\"/experiments/{EXP_ID}/gates\", status=201, json=gates[0], ) with pytest.raises( ValueError, match=\"Please specify only 'fcs_file' or", "pytest import responses from cellengine.utils.generate_id import generate_id from cellengine.resources.fcs_file import FcsFile EXP_ID =", "x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", tailored_per_file=True, ) assert json.loads(responses.calls[2].request.body)[\"tailoredPerFile\"] is True assert ( json.loads(responses.calls[2].request.body)[\"fcsFileId\"]", "= FcsFile.from_dict(fcs_files[0]) responses.add( responses.DELETE, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{fcs_file._id}\", ) deleted = fcs_file.delete() assert deleted", "json=fcs_files[0], ) file = FcsFile.get(EXP_ID, file_id) assert type(file) is FcsFile @responses.activate def test_should_create_fcs_file(ENDPOINT_BASE,", "rectangle_gate ): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\",", "assert json.loads(responses.calls[0].request.body)[\"gid\"] == global_gid @responses.activate def test_specify_fcs_file_id(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE +", "x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", fcs_file_id=\"5d38a7159fae87499999a74e\", tailored_per_file=True, ) @responses.activate def test_tailored_per_file_true(ENDPOINT_BASE, experiment, rectangle_gate):", "tailored_per_file=True, ) @responses.activate def test_tailored_per_file_true(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201,", "): \"\"\"Test upload of a new fcs_file. This test must be run from", "1, \"preSubsampleP\": 1 # leave out \"seed\" to test param not specified }", ") assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True @responses.activate def test_fcs_file_id_is_None_and_fcs_file_is_None( ENDPOINT_BASE, experiment, rectangle_gate ): responses.add(", "global_gid @responses.activate def test_specify_fcs_file_id(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate,", "client, fcs_files): \"\"\"Test upload of a new fcs_file. This test must be run", "( [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], ), ({EXP_ID: FCSFILE_ID}, [{EXP_ID: FCSFILE_ID}]), ([{EXP_ID:", "x2=2, y1=3, y2=4 ) assert json.loads(responses.calls[0].request.body)[\"fcsFileId\"] is None @responses.activate def test_create_global_tailored_gate(ENDPOINT_BASE, experiment, rectangle_gate):", "\"addEventNumber\": True, \"preSubsampleN\": 1, \"preSubsampleP\": 1 # leave out \"seed\" to test param", "experiment, rectangle_gate): global_gid = generate_id() responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, )", "json.loads(responses.calls[0].request.body)[\"fcsFileId\"] is None @responses.activate def test_create_global_tailored_gate(ENDPOINT_BASE, experiment, rectangle_gate): global_gid = generate_id() responses.add( responses.POST,", "([{EXP_ID: FCSFILE_ID}], [{EXP_ID: FCSFILE_ID}]), ] @pytest.mark.parametrize(\"fcs_file_args,expected_response\", params) @responses.activate def test_should_create_fcs_file_and_correctly_parse_fcs_file_args( ENDPOINT_BASE, client, fcs_files,", "deleted = fcs_file.delete() assert deleted is None @responses.activate def test_fcs_file_and_fcs_file_id_defined( ENDPOINT_BASE, experiment, experiments,", ") responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/5d64abe2ca9df61349ed8e7c\", json=fcs_files[3], ) responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\",", "+ f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, fcs_file_args, \"new file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\":", "def test_should_create_fcs_file_and_correctly_parse_fcs_file_args( ENDPOINT_BASE, client, fcs_files, fcs_file_args, expected_response ): \"\"\"Test upload of a new", "add_event_number=True, pre_subsample_n=1, pre_subsample_p=1, ) assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [FCSFILE_ID], \"filename\": \"new name\",", "[ (FCSFILE_ID, [FCSFILE_ID]), ([FCSFILE_ID], [FCSFILE_ID]), ( [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], ),", ") experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"]", "y1=3, y2=4, tailored_per_file=True, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True @responses.activate def test_fcs_file_id_is_None_and_fcs_file_is_None( ENDPOINT_BASE, experiment,", "json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", tailored_per_file=True, )", "fcs_file.delete() assert deleted is None @responses.activate def test_fcs_file_and_fcs_file_id_defined( ENDPOINT_BASE, experiment, experiments, gates ):", "pre_subsample_n=1, pre_subsample_p=1, ) assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [FCSFILE_ID], \"filename\": \"new name\", \"addFileNumber\":", "client, fcs_files): fcs_file = FcsFile.from_dict(fcs_files[0]) responses.add( responses.DELETE, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{fcs_file._id}\", ) deleted =", "): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=gates[0], ) with pytest.raises( ValueError, match=\"Please", "y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", fcs_file_id=\"5d38a7159fae87499999a74e\", tailored_per_file=True, ) @responses.activate def test_tailored_per_file_true(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST,", "cellengine.utils.generate_id import generate_id from cellengine.resources.fcs_file import FcsFile EXP_ID = \"5d38a6f79fae87499999a74b\" FCSFILE_ID = \"5d64abe2ca9df61349ed8e7c\"", "fcs_files): fcs_file = FcsFile.from_dict(fcs_files[0]) responses.add( responses.DELETE, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{fcs_file._id}\", ) deleted = fcs_file.delete()", "import responses from cellengine.utils.generate_id import generate_id from cellengine.resources.fcs_file import FcsFile EXP_ID = \"5d38a6f79fae87499999a74b\"", "fcs_file=\"Specimen_001_A1_A01.fcs\", tailored_per_file=True, ) assert json.loads(responses.calls[2].request.body)[\"tailoredPerFile\"] is True assert ( json.loads(responses.calls[2].request.body)[\"fcsFileId\"] == \"5d64abe2ca9df61349ed8e7c\" )", "json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file_id=\"5d38a7159fae87499999a751\", tailored_per_file=True, )", "FcsFile.create(EXP_ID, [fcs_files[0][\"_id\"]], \"new file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [\"5d64abe2ca9df61349ed8e79\"], \"filename\": \"new file\",", "pre_subsample_p=1, ) assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [FCSFILE_ID], \"filename\": \"new name\", \"addFileNumber\": True,", "f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\",", "\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", fcs_file_id=\"5d38a7159fae87499999a74e\", tailored_per_file=True, ) @responses.activate def test_tailored_per_file_true(ENDPOINT_BASE, experiment,", "f\"/experiments/{EXP_ID}/fcsfiles\", json=[fcs_files[3]], ) responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/5d64abe2ca9df61349ed8e7c\", json=fcs_files[3], ) responses.add( responses.POST, ENDPOINT_BASE", "fcs_files): file_id = fcs_files[0][\"_id\"] responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{file_id}\", json=fcs_files[0], ) file =", "+ f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create( EXP_ID, FCSFILE_ID, \"new name\", add_file_number=True, add_event_number=True, pre_subsample_n=1, pre_subsample_p=1,", "FcsFile.get(EXP_ID, file_id) assert type(file) is FcsFile @responses.activate def test_should_create_fcs_file(ENDPOINT_BASE, client, fcs_files): \"\"\"Test upload", "[\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], ), ({EXP_ID: FCSFILE_ID}, [{EXP_ID: FCSFILE_ID}]), ([{EXP_ID: FCSFILE_ID}],", "y2=4 ) assert json.loads(responses.calls[0].request.body)[\"fcsFileId\"] is None @responses.activate def test_create_global_tailored_gate(ENDPOINT_BASE, experiment, rectangle_gate): global_gid =", "[FCSFILE_ID]), ([FCSFILE_ID], [FCSFILE_ID]), ( [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], ), ({EXP_ID: FCSFILE_ID},", "name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file_id=\"5d38a7159fae87499999a751\", tailored_per_file=True, ) assert ( json.loads(responses.calls[0].request.body)[\"fcsFileId\"] == \"5d38a7159fae87499999a751\"", "\"new file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": expected_response, \"filename\": \"new file\", } @responses.activate", "= \"5d64abe2ca9df61349ed8e7c\" @responses.activate def test_should_get_fcs_file(ENDPOINT_BASE, client, fcs_files): file_id = fcs_files[0][\"_id\"] responses.add( responses.GET, ENDPOINT_BASE", "[{EXP_ID: FCSFILE_ID}]), ] @pytest.mark.parametrize(\"fcs_file_args,expected_response\", params) @responses.activate def test_should_create_fcs_file_and_correctly_parse_fcs_file_args( ENDPOINT_BASE, client, fcs_files, fcs_file_args, expected_response", "experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\",", "ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=[fcs_files[3]], ) responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/5d64abe2ca9df61349ed8e7c\", json=fcs_files[3], ) responses.add(", "or 'fcs_file_id'.\" ): experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", fcs_file_id=\"5d38a7159fae87499999a74e\",", "{ \"fcsFiles\": [\"5d64abe2ca9df61349ed8e79\"], \"filename\": \"new file\", } params = [ (FCSFILE_ID, [FCSFILE_ID]), ([FCSFILE_ID],", "FCSFILE_ID = \"5d64abe2ca9df61349ed8e7c\" @responses.activate def test_should_get_fcs_file(ENDPOINT_BASE, client, fcs_files): file_id = fcs_files[0][\"_id\"] responses.add( responses.GET,", "ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{fcs_file._id}\", ) deleted = fcs_file.delete() assert deleted is None @responses.activate def", "add_file_number=True, add_event_number=True, pre_subsample_n=1, pre_subsample_p=1, ) assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [FCSFILE_ID], \"filename\": \"new", "json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, ) assert", ") experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4 ) assert json.loads(responses.calls[0].request.body)[\"fcsFileId\"] is", "y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", tailored_per_file=True, ) assert json.loads(responses.calls[2].request.body)[\"tailoredPerFile\"] is True assert ( json.loads(responses.calls[2].request.body)[\"fcsFileId\"] == \"5d64abe2ca9df61349ed8e7c\"", "import pytest import responses from cellengine.utils.generate_id import generate_id from cellengine.resources.fcs_file import FcsFile EXP_ID", "pytest.raises( ValueError, match=\"Please specify only 'fcs_file' or 'fcs_file_id'.\" ): experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\",", "= fcs_files[0][\"_id\"] responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{file_id}\", json=fcs_files[0], ) file = FcsFile.get(EXP_ID, file_id)", "test_should_create_fcs_file_and_correctly_parse_body_args( ENDPOINT_BASE, client, fcs_files ): \"\"\"Test upload of a new fcs_file. This test", "responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, fcs_file_args, \"new file\") assert json.loads(responses.calls[0].request.body) ==", "global_gid = generate_id() responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\",", "def test_should_delete_fcs_file(ENDPOINT_BASE, client, fcs_files): fcs_file = FcsFile.from_dict(fcs_files[0]) responses.add( responses.DELETE, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{fcs_file._id}\", )", "([FCSFILE_ID], [FCSFILE_ID]), ( [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], ), ({EXP_ID: FCSFILE_ID}, [{EXP_ID:", "y2=4, fcs_file_id=\"5d38a7159fae87499999a751\", tailored_per_file=True, ) assert ( json.loads(responses.calls[0].request.body)[\"fcsFileId\"] == \"5d38a7159fae87499999a751\" ) @responses.activate def test_fcs_file_called_by_name(ENDPOINT_BASE,", "json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, gid=global_gid, )", "fcs_files[0][\"_id\"] responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{file_id}\", json=fcs_files[0], ) file = FcsFile.get(EXP_ID, file_id) assert", "\"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True", "be run from the project root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1],", "\"new name\", \"addFileNumber\": True, \"addEventNumber\": True, \"preSubsampleN\": 1, \"preSubsampleP\": 1 # leave out", "\"5d38a7159fae87499999a751\" ) @responses.activate def test_fcs_file_called_by_name(ENDPOINT_BASE, experiment, fcs_files, rectangle_gate): responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\",", "status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4 ) assert", "responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=[fcs_files[3]], ) responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/5d64abe2ca9df61349ed8e7c\", json=fcs_files[3], )", "[\"5d64abe2ca9df61349ed8e79\"], \"filename\": \"new file\", } params = [ (FCSFILE_ID, [FCSFILE_ID]), ([FCSFILE_ID], [FCSFILE_ID]), (", "def test_specify_fcs_file_id(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate(", ") @responses.activate def test_tailored_per_file_true(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate,", "@pytest.mark.parametrize(\"fcs_file_args,expected_response\", params) @responses.activate def test_should_create_fcs_file_and_correctly_parse_fcs_file_args( ENDPOINT_BASE, client, fcs_files, fcs_file_args, expected_response ): \"\"\"Test upload", "test_fcs_file_id_is_None_and_fcs_file_is_None( ENDPOINT_BASE, experiment, rectangle_gate ): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, )", "ENDPOINT_BASE, client, fcs_files, fcs_file_args, expected_response ): \"\"\"Test upload of a new fcs_file. This", "== { \"fcsFiles\": expected_response, \"filename\": \"new file\", } @responses.activate def test_should_create_fcs_file_and_correctly_parse_body_args( ENDPOINT_BASE, client,", "from cellengine.resources.fcs_file import FcsFile EXP_ID = \"5d38a6f79fae87499999a74b\" FCSFILE_ID = \"5d64abe2ca9df61349ed8e7c\" @responses.activate def test_should_get_fcs_file(ENDPOINT_BASE,", "x2=2, y1=3, y2=4, fcs_file_id=\"5d38a7159fae87499999a751\", tailored_per_file=True, ) assert ( json.loads(responses.calls[0].request.body)[\"fcsFileId\"] == \"5d38a7159fae87499999a751\" ) @responses.activate", "assert ( json.loads(responses.calls[0].request.body)[\"fcsFileId\"] == \"5d38a7159fae87499999a751\" ) @responses.activate def test_fcs_file_called_by_name(ENDPOINT_BASE, experiment, fcs_files, rectangle_gate): responses.add(", "\"fcs_file_id_3\"], ), ({EXP_ID: FCSFILE_ID}, [{EXP_ID: FCSFILE_ID}]), ([{EXP_ID: FCSFILE_ID}], [{EXP_ID: FCSFILE_ID}]), ] @pytest.mark.parametrize(\"fcs_file_args,expected_response\", params)", "status=201, json=gates[0], ) with pytest.raises( ValueError, match=\"Please specify only 'fcs_file' or 'fcs_file_id'.\" ):", "+ f\"/experiments/{EXP_ID}/fcsfiles/5d64abe2ca9df61349ed8e7c\", json=fcs_files[3], ) responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate(", "x2=2, y1=3, y2=4, tailored_per_file=True, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True @responses.activate def test_fcs_file_id_is_None_and_fcs_file_is_None( ENDPOINT_BASE,", "generate_id() responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\",", "tailored_per_file=True, gid=global_gid, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True assert json.loads(responses.calls[0].request.body)[\"gid\"] == global_gid @responses.activate def", "\"fcs_file_id_3\"], [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], ), ({EXP_ID: FCSFILE_ID}, [{EXP_ID: FCSFILE_ID}]), ([{EXP_ID: FCSFILE_ID}], [{EXP_ID: FCSFILE_ID}]),", "EXP_ID, FCSFILE_ID, \"new name\", add_file_number=True, add_event_number=True, pre_subsample_n=1, pre_subsample_p=1, ) assert json.loads(responses.calls[0].request.body) == {", ") assert ( json.loads(responses.calls[0].request.body)[\"fcsFileId\"] == \"5d38a7159fae87499999a751\" ) @responses.activate def test_fcs_file_called_by_name(ENDPOINT_BASE, experiment, fcs_files, rectangle_gate):", "+ f\"/experiments/{EXP_ID}/fcsfiles/{fcs_file._id}\", ) deleted = fcs_file.delete() assert deleted is None @responses.activate def test_fcs_file_and_fcs_file_id_defined(", "match=\"Please specify only 'fcs_file' or 'fcs_file_id'.\" ): experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2,", "from the project root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID,", "json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [FCSFILE_ID], \"filename\": \"new name\", \"addFileNumber\": True, \"addEventNumber\": True, \"preSubsampleN\":", "out \"seed\" to test param not specified } @responses.activate def test_should_delete_fcs_file(ENDPOINT_BASE, client, fcs_files):", "name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, gid=global_gid, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True assert", "json=fcs_files[1], ) FcsFile.create(EXP_ID, fcs_file_args, \"new file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": expected_response, \"filename\":", "responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=[fcs_files[3]], ) responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/5d64abe2ca9df61349ed8e7c\", json=fcs_files[3],", "1 # leave out \"seed\" to test param not specified } @responses.activate def", "fcs_files, fcs_file_args, expected_response ): \"\"\"Test upload of a new fcs_file. This test must", "rectangle_gate): global_gid = generate_id() responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate(", "@responses.activate def test_fcs_file_called_by_name(ENDPOINT_BASE, experiment, fcs_files, rectangle_gate): responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=[fcs_files[3]], )", "leave out \"seed\" to test param not specified } @responses.activate def test_should_delete_fcs_file(ENDPOINT_BASE, client,", "ENDPOINT_BASE, client, fcs_files ): \"\"\"Test upload of a new fcs_file. This test must", "ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, [fcs_files[0][\"_id\"]], \"new file\") assert json.loads(responses.calls[0].request.body) == {", "\"filename\": \"new name\", \"addFileNumber\": True, \"addEventNumber\": True, \"preSubsampleN\": 1, \"preSubsampleP\": 1 # leave", "client, fcs_files): file_id = fcs_files[0][\"_id\"] responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{file_id}\", json=fcs_files[0], ) file", "\"fcs_file_id_2\", \"fcs_file_id_3\"], ), ({EXP_ID: FCSFILE_ID}, [{EXP_ID: FCSFILE_ID}]), ([{EXP_ID: FCSFILE_ID}], [{EXP_ID: FCSFILE_ID}]), ] @pytest.mark.parametrize(\"fcs_file_args,expected_response\",", ") FcsFile.create(EXP_ID, fcs_file_args, \"new file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": expected_response, \"filename\": \"new", "specified } @responses.activate def test_should_delete_fcs_file(ENDPOINT_BASE, client, fcs_files): fcs_file = FcsFile.from_dict(fcs_files[0]) responses.add( responses.DELETE, ENDPOINT_BASE", "file_id = fcs_files[0][\"_id\"] responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{file_id}\", json=fcs_files[0], ) file = FcsFile.get(EXP_ID,", "\"5d38a6f79fae87499999a74b\" FCSFILE_ID = \"5d64abe2ca9df61349ed8e7c\" @responses.activate def test_should_get_fcs_file(ENDPOINT_BASE, client, fcs_files): file_id = fcs_files[0][\"_id\"] responses.add(", "responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{file_id}\", json=fcs_files[0], ) file = FcsFile.get(EXP_ID, file_id) assert type(file) is", "FcsFile.from_dict(fcs_files[0]) responses.add( responses.DELETE, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{fcs_file._id}\", ) deleted = fcs_file.delete() assert deleted is", "This test must be run from the project root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE", "f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4 )", "True assert json.loads(responses.calls[0].request.body)[\"gid\"] == global_gid @responses.activate def test_specify_fcs_file_id(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE", "\"new file\", } params = [ (FCSFILE_ID, [FCSFILE_ID]), ([FCSFILE_ID], [FCSFILE_ID]), ( [\"fcs_file_id_1\", \"fcs_file_id_2\",", "json.loads(responses.calls[0].request.body)[\"fcsFileId\"] == \"5d38a7159fae87499999a751\" ) @responses.activate def test_fcs_file_called_by_name(ENDPOINT_BASE, experiment, fcs_files, rectangle_gate): responses.add( responses.GET, ENDPOINT_BASE", "expected_response ): \"\"\"Test upload of a new fcs_file. This test must be run", "json.loads(responses.calls[0].request.body)[\"gid\"] == global_gid @responses.activate def test_specify_fcs_file_id(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\",", "responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1,", "assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": expected_response, \"filename\": \"new file\", } @responses.activate def test_should_create_fcs_file_and_correctly_parse_body_args(", "fcs_file = FcsFile.from_dict(fcs_files[0]) responses.add( responses.DELETE, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{fcs_file._id}\", ) deleted = fcs_file.delete() assert", "def test_fcs_file_called_by_name(ENDPOINT_BASE, experiment, fcs_files, rectangle_gate): responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=[fcs_files[3]], ) responses.add(", "} @responses.activate def test_should_delete_fcs_file(ENDPOINT_BASE, client, fcs_files): fcs_file = FcsFile.from_dict(fcs_files[0]) responses.add( responses.DELETE, ENDPOINT_BASE +", "\"preSubsampleP\": 1 # leave out \"seed\" to test param not specified } @responses.activate", "f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True,", "import generate_id from cellengine.resources.fcs_file import FcsFile EXP_ID = \"5d38a6f79fae87499999a74b\" FCSFILE_ID = \"5d64abe2ca9df61349ed8e7c\" @responses.activate", "file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [\"5d64abe2ca9df61349ed8e79\"], \"filename\": \"new file\", } params =", ") assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True assert json.loads(responses.calls[0].request.body)[\"gid\"] == global_gid @responses.activate def test_specify_fcs_file_id(ENDPOINT_BASE, experiment,", "client, fcs_files ): \"\"\"Test upload of a new fcs_file. This test must be", "{ \"fcsFiles\": [FCSFILE_ID], \"filename\": \"new name\", \"addFileNumber\": True, \"addEventNumber\": True, \"preSubsampleN\": 1, \"preSubsampleP\":", "y2=4, tailored_per_file=True, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True @responses.activate def test_fcs_file_id_is_None_and_fcs_file_is_None( ENDPOINT_BASE, experiment, rectangle_gate", "params) @responses.activate def test_should_create_fcs_file_and_correctly_parse_fcs_file_args( ENDPOINT_BASE, client, fcs_files, fcs_file_args, expected_response ): \"\"\"Test upload of", "= fcs_file.delete() assert deleted is None @responses.activate def test_fcs_file_and_fcs_file_id_defined( ENDPOINT_BASE, experiment, experiments, gates", "\"fcs_file_id_2\", \"fcs_file_id_3\"], [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], ), ({EXP_ID: FCSFILE_ID}, [{EXP_ID: FCSFILE_ID}]), ([{EXP_ID: FCSFILE_ID}], [{EXP_ID:", "FCSFILE_ID}]), ] @pytest.mark.parametrize(\"fcs_file_args,expected_response\", params) @responses.activate def test_should_create_fcs_file_and_correctly_parse_fcs_file_args( ENDPOINT_BASE, client, fcs_files, fcs_file_args, expected_response ):", "ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create( EXP_ID, FCSFILE_ID, \"new name\", add_file_number=True, add_event_number=True, pre_subsample_n=1,", "x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", tailored_per_file=True, ) assert json.loads(responses.calls[2].request.body)[\"tailoredPerFile\"] is True assert (", "( json.loads(responses.calls[0].request.body)[\"fcsFileId\"] == \"5d38a7159fae87499999a751\" ) @responses.activate def test_fcs_file_called_by_name(ENDPOINT_BASE, experiment, fcs_files, rectangle_gate): responses.add( responses.GET,", "def test_fcs_file_id_is_None_and_fcs_file_is_None( ENDPOINT_BASE, experiment, rectangle_gate ): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate,", "root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, [fcs_files[0][\"_id\"]], \"new file\")", "file_id) assert type(file) is FcsFile @responses.activate def test_should_create_fcs_file(ENDPOINT_BASE, client, fcs_files): \"\"\"Test upload of", "rectangle_gate): responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=[fcs_files[3]], ) responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/5d64abe2ca9df61349ed8e7c\",", "a new fcs_file. This test must be run from the project root directory\"\"\"", "responses from cellengine.utils.generate_id import generate_id from cellengine.resources.fcs_file import FcsFile EXP_ID = \"5d38a6f79fae87499999a74b\" FCSFILE_ID", "experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", tailored_per_file=True, ) assert json.loads(responses.calls[2].request.body)[\"tailoredPerFile\"]", "fcs_files): \"\"\"Test upload of a new fcs_file. This test must be run from", "{ \"fcsFiles\": expected_response, \"filename\": \"new file\", } @responses.activate def test_should_create_fcs_file_and_correctly_parse_body_args( ENDPOINT_BASE, client, fcs_files", "+ f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, [fcs_files[0][\"_id\"]], \"new file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\":", "params = [ (FCSFILE_ID, [FCSFILE_ID]), ([FCSFILE_ID], [FCSFILE_ID]), ( [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], [\"fcs_file_id_1\", \"fcs_file_id_2\",", "fcs_file. This test must be run from the project root directory\"\"\" responses.add( responses.POST,", "directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, fcs_file_args, \"new file\") assert", "status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", tailored_per_file=True,", "None @responses.activate def test_create_global_tailored_gate(ENDPOINT_BASE, experiment, rectangle_gate): global_gid = generate_id() responses.add( responses.POST, ENDPOINT_BASE +", "not specified } @responses.activate def test_should_delete_fcs_file(ENDPOINT_BASE, client, fcs_files): fcs_file = FcsFile.from_dict(fcs_files[0]) responses.add( responses.DELETE,", "+ f\"/experiments/{EXP_ID}/fcsfiles/{file_id}\", json=fcs_files[0], ) file = FcsFile.get(EXP_ID, file_id) assert type(file) is FcsFile @responses.activate", "project root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create( EXP_ID, FCSFILE_ID,", "[\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], ), ({EXP_ID: FCSFILE_ID}, [{EXP_ID: FCSFILE_ID}]), ([{EXP_ID: FCSFILE_ID}], [{EXP_ID: FCSFILE_ID}]), ]", "x2=2, y1=3, y2=4, tailored_per_file=True, gid=global_gid, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True assert json.loads(responses.calls[0].request.body)[\"gid\"] ==", "generate_id from cellengine.resources.fcs_file import FcsFile EXP_ID = \"5d38a6f79fae87499999a74b\" FCSFILE_ID = \"5d64abe2ca9df61349ed8e7c\" @responses.activate def", "rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\",", "json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4 ) assert json.loads(responses.calls[0].request.body)[\"fcsFileId\"]", "assert json.loads(responses.calls[0].request.body)[\"fcsFileId\"] is None @responses.activate def test_create_global_tailored_gate(ENDPOINT_BASE, experiment, rectangle_gate): global_gid = generate_id() responses.add(", "type(file) is FcsFile @responses.activate def test_should_create_fcs_file(ENDPOINT_BASE, client, fcs_files): \"\"\"Test upload of a new", "\"\"\"Test upload of a new fcs_file. This test must be run from the", "responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=gates[0], ) with pytest.raises( ValueError, match=\"Please specify", "the project root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create( EXP_ID,", "responses.DELETE, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{fcs_file._id}\", ) deleted = fcs_file.delete() assert deleted is None @responses.activate", "with pytest.raises( ValueError, match=\"Please specify only 'fcs_file' or 'fcs_file_id'.\" ): experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\",", "responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, fcs_file_args, \"new file\") assert json.loads(responses.calls[0].request.body)", "\"5d64abe2ca9df61349ed8e7c\" @responses.activate def test_should_get_fcs_file(ENDPOINT_BASE, client, fcs_files): file_id = fcs_files[0][\"_id\"] responses.add( responses.GET, ENDPOINT_BASE +", "EXP_ID = \"5d38a6f79fae87499999a74b\" FCSFILE_ID = \"5d64abe2ca9df61349ed8e7c\" @responses.activate def test_should_get_fcs_file(ENDPOINT_BASE, client, fcs_files): file_id =", "specify only 'fcs_file' or 'fcs_file_id'.\" ): experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3,", "expected_response, \"filename\": \"new file\", } @responses.activate def test_should_create_fcs_file_and_correctly_parse_body_args( ENDPOINT_BASE, client, fcs_files ): \"\"\"Test", "to test param not specified } @responses.activate def test_should_delete_fcs_file(ENDPOINT_BASE, client, fcs_files): fcs_file =", "from the project root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(", "is True @responses.activate def test_fcs_file_id_is_None_and_fcs_file_is_None( ENDPOINT_BASE, experiment, rectangle_gate ): responses.add( responses.POST, ENDPOINT_BASE +", "\"seed\" to test param not specified } @responses.activate def test_should_delete_fcs_file(ENDPOINT_BASE, client, fcs_files): fcs_file", "experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", fcs_file_id=\"5d38a7159fae87499999a74e\", tailored_per_file=True, ) @responses.activate", "cellengine.resources.fcs_file import FcsFile EXP_ID = \"5d38a6f79fae87499999a74b\" FCSFILE_ID = \"5d64abe2ca9df61349ed8e7c\" @responses.activate def test_should_get_fcs_file(ENDPOINT_BASE, client,", "fcs_files ): \"\"\"Test upload of a new fcs_file. This test must be run", "status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, gid=global_gid,", "x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, gid=global_gid, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is", "import json import pytest import responses from cellengine.utils.generate_id import generate_id from cellengine.resources.fcs_file import", "responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, [fcs_files[0][\"_id\"]], \"new file\") assert json.loads(responses.calls[0].request.body) ==", "is None @responses.activate def test_fcs_file_and_fcs_file_id_defined( ENDPOINT_BASE, experiment, experiments, gates ): responses.add( responses.POST, ENDPOINT_BASE", "experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file_id=\"5d38a7159fae87499999a751\", tailored_per_file=True, ) assert (", "def test_should_create_fcs_file_and_correctly_parse_body_args( ENDPOINT_BASE, client, fcs_files ): \"\"\"Test upload of a new fcs_file. This", "y1=3, y2=4, fcs_file_id=\"5d38a7159fae87499999a751\", tailored_per_file=True, ) assert ( json.loads(responses.calls[0].request.body)[\"fcsFileId\"] == \"5d38a7159fae87499999a751\" ) @responses.activate def", "'fcs_file' or 'fcs_file_id'.\" ): experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\",", "= FcsFile.get(EXP_ID, file_id) assert type(file) is FcsFile @responses.activate def test_should_create_fcs_file(ENDPOINT_BASE, client, fcs_files): \"\"\"Test", "assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [\"5d64abe2ca9df61349ed8e79\"], \"filename\": \"new file\", } params = [", ") with pytest.raises( ValueError, match=\"Please specify only 'fcs_file' or 'fcs_file_id'.\" ): experiment.create_rectangle_gate( \"FSC-A\",", "True, \"addEventNumber\": True, \"preSubsampleN\": 1, \"preSubsampleP\": 1 # leave out \"seed\" to test", "responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2,", "test_tailored_per_file_true(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\",", "import FcsFile EXP_ID = \"5d38a6f79fae87499999a74b\" FCSFILE_ID = \"5d64abe2ca9df61349ed8e7c\" @responses.activate def test_should_get_fcs_file(ENDPOINT_BASE, client, fcs_files):", "run from the project root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], )", "\"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", fcs_file_id=\"5d38a7159fae87499999a74e\", tailored_per_file=True, ) @responses.activate def test_tailored_per_file_true(ENDPOINT_BASE,", "fcs_file_args, expected_response ): \"\"\"Test upload of a new fcs_file. This test must be", "root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create( EXP_ID, FCSFILE_ID, \"new", "json=fcs_files[1], ) FcsFile.create(EXP_ID, [fcs_files[0][\"_id\"]], \"new file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [\"5d64abe2ca9df61349ed8e79\"], \"filename\":", "test_specify_fcs_file_id(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\",", "[FCSFILE_ID], \"filename\": \"new name\", \"addFileNumber\": True, \"addEventNumber\": True, \"preSubsampleN\": 1, \"preSubsampleP\": 1 #", "\"preSubsampleN\": 1, \"preSubsampleP\": 1 # leave out \"seed\" to test param not specified", "gates ): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=gates[0], ) with pytest.raises( ValueError,", "root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, fcs_file_args, \"new file\")", "@responses.activate def test_fcs_file_id_is_None_and_fcs_file_is_None( ENDPOINT_BASE, experiment, rectangle_gate ): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201,", "} params = [ (FCSFILE_ID, [FCSFILE_ID]), ([FCSFILE_ID], [FCSFILE_ID]), ( [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], [\"fcs_file_id_1\",", "\"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4 ) assert json.loads(responses.calls[0].request.body)[\"fcsFileId\"] is None @responses.activate", "responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=gates[0], ) with pytest.raises( ValueError, match=\"Please specify only", "y1=3, y2=4 ) assert json.loads(responses.calls[0].request.body)[\"fcsFileId\"] is None @responses.activate def test_create_global_tailored_gate(ENDPOINT_BASE, experiment, rectangle_gate): global_gid", "experiment, experiments, gates ): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=gates[0], ) with", "[FCSFILE_ID]), ( [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], ), ({EXP_ID: FCSFILE_ID}, [{EXP_ID: FCSFILE_ID}]),", "experiments, gates ): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=gates[0], ) with pytest.raises(", "param not specified } @responses.activate def test_should_delete_fcs_file(ENDPOINT_BASE, client, fcs_files): fcs_file = FcsFile.from_dict(fcs_files[0]) responses.add(", "responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/5d64abe2ca9df61349ed8e7c\", json=fcs_files[3], ) responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate,", "test_fcs_file_and_fcs_file_id_defined( ENDPOINT_BASE, experiment, experiments, gates ): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=gates[0],", "== global_gid @responses.activate def test_specify_fcs_file_id(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201,", "def test_tailored_per_file_true(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate(", "experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is", "f\"/experiments/{EXP_ID}/gates\", status=201, json=gates[0], ) with pytest.raises( ValueError, match=\"Please specify only 'fcs_file' or 'fcs_file_id'.\"", "y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file_id=\"5d38a7159fae87499999a751\", tailored_per_file=True, ) assert ( json.loads(responses.calls[0].request.body)[\"fcsFileId\"] ==", "\"fcsFiles\": expected_response, \"filename\": \"new file\", } @responses.activate def test_should_create_fcs_file_and_correctly_parse_body_args( ENDPOINT_BASE, client, fcs_files ):", "from cellengine.utils.generate_id import generate_id from cellengine.resources.fcs_file import FcsFile EXP_ID = \"5d38a6f79fae87499999a74b\" FCSFILE_ID =", "tailored_per_file=True, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True @responses.activate def test_fcs_file_id_is_None_and_fcs_file_is_None( ENDPOINT_BASE, experiment, rectangle_gate ):", ") deleted = fcs_file.delete() assert deleted is None @responses.activate def test_fcs_file_and_fcs_file_id_defined( ENDPOINT_BASE, experiment,", "fcs_file_args, \"new file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": expected_response, \"filename\": \"new file\", }", "@responses.activate def test_should_create_fcs_file_and_correctly_parse_body_args( ENDPOINT_BASE, client, fcs_files ): \"\"\"Test upload of a new fcs_file.", "assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True assert json.loads(responses.calls[0].request.body)[\"gid\"] == global_gid @responses.activate def test_specify_fcs_file_id(ENDPOINT_BASE, experiment, rectangle_gate):", "ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, fcs_file_args, \"new file\") assert json.loads(responses.calls[0].request.body) == {", "file\", } params = [ (FCSFILE_ID, [FCSFILE_ID]), ([FCSFILE_ID], [FCSFILE_ID]), ( [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"],", "y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, gid=global_gid, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True", "@responses.activate def test_tailored_per_file_true(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, )", "+ f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4", "is True assert json.loads(responses.calls[0].request.body)[\"gid\"] == global_gid @responses.activate def test_specify_fcs_file_id(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST,", "responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/5d64abe2ca9df61349ed8e7c\", json=fcs_files[3], ) responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201,", ") FcsFile.create( EXP_ID, FCSFILE_ID, \"new name\", add_file_number=True, add_event_number=True, pre_subsample_n=1, pre_subsample_p=1, ) assert json.loads(responses.calls[0].request.body)", "x1=1, x2=2, y1=3, y2=4 ) assert json.loads(responses.calls[0].request.body)[\"fcsFileId\"] is None @responses.activate def test_create_global_tailored_gate(ENDPOINT_BASE, experiment,", "# leave out \"seed\" to test param not specified } @responses.activate def test_should_delete_fcs_file(ENDPOINT_BASE,", "responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2,", ") experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file_id=\"5d38a7159fae87499999a751\", tailored_per_file=True, ) assert", "new fcs_file. This test must be run from the project root directory\"\"\" responses.add(", ") experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, gid=global_gid, ) assert", "json import pytest import responses from cellengine.utils.generate_id import generate_id from cellengine.resources.fcs_file import FcsFile", "({EXP_ID: FCSFILE_ID}, [{EXP_ID: FCSFILE_ID}]), ([{EXP_ID: FCSFILE_ID}], [{EXP_ID: FCSFILE_ID}]), ] @pytest.mark.parametrize(\"fcs_file_args,expected_response\", params) @responses.activate def", "fcs_file_id=\"5d38a7159fae87499999a751\", tailored_per_file=True, ) assert ( json.loads(responses.calls[0].request.body)[\"fcsFileId\"] == \"5d38a7159fae87499999a751\" ) @responses.activate def test_fcs_file_called_by_name(ENDPOINT_BASE, experiment,", "json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [\"5d64abe2ca9df61349ed8e79\"], \"filename\": \"new file\", } params = [ (FCSFILE_ID,", "ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( x_channel=\"FSC-A\", y_channel=\"FSC-W\", name=\"fcs_rect_gate\", x1=1, x2=2, y1=3,", "(FCSFILE_ID, [FCSFILE_ID]), ([FCSFILE_ID], [FCSFILE_ID]), ( [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], ), ({EXP_ID:", "\"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", fcs_file_id=\"5d38a7159fae87499999a74e\", tailored_per_file=True, ) @responses.activate def", "\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, ) assert json.loads(responses.calls[0].request.body)[\"tailoredPerFile\"] is True @responses.activate def", "test_fcs_file_called_by_name(ENDPOINT_BASE, experiment, fcs_files, rectangle_gate): responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=[fcs_files[3]], ) responses.add( responses.GET,", "== \"5d38a7159fae87499999a751\" ) @responses.activate def test_fcs_file_called_by_name(ENDPOINT_BASE, experiment, fcs_files, rectangle_gate): responses.add( responses.GET, ENDPOINT_BASE +", "experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4 ) assert json.loads(responses.calls[0].request.body)[\"fcsFileId\"] is None", "x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", fcs_file_id=\"5d38a7159fae87499999a74e\", tailored_per_file=True, ) @responses.activate def test_tailored_per_file_true(ENDPOINT_BASE, experiment, rectangle_gate): responses.add(", "deleted is None @responses.activate def test_fcs_file_and_fcs_file_id_defined( ENDPOINT_BASE, experiment, experiments, gates ): responses.add( responses.POST,", "\"filename\": \"new file\", } params = [ (FCSFILE_ID, [FCSFILE_ID]), ([FCSFILE_ID], [FCSFILE_ID]), ( [\"fcs_file_id_1\",", "test_should_delete_fcs_file(ENDPOINT_BASE, client, fcs_files): fcs_file = FcsFile.from_dict(fcs_files[0]) responses.add( responses.DELETE, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{fcs_file._id}\", ) deleted", "\"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", tailored_per_file=True, ) assert json.loads(responses.calls[2].request.body)[\"tailoredPerFile\"] is True assert", "f\"/experiments/{EXP_ID}/fcsfiles/{file_id}\", json=fcs_files[0], ) file = FcsFile.get(EXP_ID, file_id) assert type(file) is FcsFile @responses.activate def", "test_create_global_tailored_gate(ENDPOINT_BASE, experiment, rectangle_gate): global_gid = generate_id() responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate,", "only 'fcs_file' or 'fcs_file_id'.\" ): experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4,", "FCSFILE_ID, \"new name\", add_file_number=True, add_event_number=True, pre_subsample_n=1, pre_subsample_p=1, ) assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\":", "== { \"fcsFiles\": [\"5d64abe2ca9df61349ed8e79\"], \"filename\": \"new file\", } params = [ (FCSFILE_ID, [FCSFILE_ID]),", "json=gates[0], ) with pytest.raises( ValueError, match=\"Please specify only 'fcs_file' or 'fcs_file_id'.\" ): experiment.create_rectangle_gate(", "status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, tailored_per_file=True, )", "is FcsFile @responses.activate def test_should_create_fcs_file(ENDPOINT_BASE, client, fcs_files): \"\"\"Test upload of a new fcs_file.", ") experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", tailored_per_file=True, ) assert", "True @responses.activate def test_fcs_file_id_is_None_and_fcs_file_is_None( ENDPOINT_BASE, experiment, rectangle_gate ): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\",", "= \"5d38a6f79fae87499999a74b\" FCSFILE_ID = \"5d64abe2ca9df61349ed8e7c\" @responses.activate def test_should_get_fcs_file(ENDPOINT_BASE, client, fcs_files): file_id = fcs_files[0][\"_id\"]", "\"addFileNumber\": True, \"addEventNumber\": True, \"preSubsampleN\": 1, \"preSubsampleP\": 1 # leave out \"seed\" to", ") FcsFile.create(EXP_ID, [fcs_files[0][\"_id\"]], \"new file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [\"5d64abe2ca9df61349ed8e79\"], \"filename\": \"new", "@responses.activate def test_specify_fcs_file_id(ENDPOINT_BASE, experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, )", "@responses.activate def test_should_get_fcs_file(ENDPOINT_BASE, client, fcs_files): file_id = fcs_files[0][\"_id\"] responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{file_id}\",", "= [ (FCSFILE_ID, [FCSFILE_ID]), ([FCSFILE_ID], [FCSFILE_ID]), ( [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"], [\"fcs_file_id_1\", \"fcs_file_id_2\", \"fcs_file_id_3\"],", "test param not specified } @responses.activate def test_should_delete_fcs_file(ENDPOINT_BASE, client, fcs_files): fcs_file = FcsFile.from_dict(fcs_files[0])", "): experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3, y2=4, fcs_file=\"Specimen_001_A1_A01.fcs\", fcs_file_id=\"5d38a7159fae87499999a74e\", tailored_per_file=True, )", "ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\", \"fcs_rect_gate\", x1=1, x2=2, y1=3,", "assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [FCSFILE_ID], \"filename\": \"new name\", \"addFileNumber\": True, \"addEventNumber\": True,", "def test_should_create_fcs_file(ENDPOINT_BASE, client, fcs_files): \"\"\"Test upload of a new fcs_file. This test must", "experiment, rectangle_gate): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201, json=rectangle_gate, ) experiment.create_rectangle_gate( \"FSC-A\", \"FSC-W\",", "json=[fcs_files[3]], ) responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/5d64abe2ca9df61349ed8e7c\", json=fcs_files[3], ) responses.add( responses.POST, ENDPOINT_BASE +", "FcsFile.create( EXP_ID, FCSFILE_ID, \"new name\", add_file_number=True, add_event_number=True, pre_subsample_n=1, pre_subsample_p=1, ) assert json.loads(responses.calls[0].request.body) ==", "def test_fcs_file_and_fcs_file_id_defined( ENDPOINT_BASE, experiment, experiments, gates ): responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/gates\", status=201,", "responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create( EXP_ID, FCSFILE_ID, \"new name\", add_file_number=True,", "assert type(file) is FcsFile @responses.activate def test_should_create_fcs_file(ENDPOINT_BASE, client, fcs_files): \"\"\"Test upload of a", "f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, fcs_file_args, \"new file\") assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": expected_response,", "the project root directory\"\"\" responses.add( responses.POST, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles\", json=fcs_files[1], ) FcsFile.create(EXP_ID, [fcs_files[0][\"_id\"]],", "def test_should_get_fcs_file(ENDPOINT_BASE, client, fcs_files): file_id = fcs_files[0][\"_id\"] responses.add( responses.GET, ENDPOINT_BASE + f\"/experiments/{EXP_ID}/fcsfiles/{file_id}\", json=fcs_files[0],", "name\", add_file_number=True, add_event_number=True, pre_subsample_n=1, pre_subsample_p=1, ) assert json.loads(responses.calls[0].request.body) == { \"fcsFiles\": [FCSFILE_ID], \"filename\":" ]
[ "i] theta = todegree(angle(x, x_des)) # print (theta) if theta > 90: theta", "des_rotation = np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3, 3) init_orient = np.asarray([1,0,0]) theta, index, sign = closest_axis_2_userdefined( to_H(current_orient),", "now apply rotation: second_goal = np.matmul(des_rotation, second_goal) # now rotate back to orientation", "R_g) if np.allclose(R_to_g, np.eye(3)): c_to_g = [np.array([0, 0, 1]), 0.] else: rot_to_g =", "the two vectors. The rotation angle is the arccosine of the dot product", "second_goal) # now rotate back to orientation that we are now at: second_goal", "1 else: rot_over = 0 return rot_over, angle else: rot_over = 0 while", "the matrix with the rotation axis defined as the cross product of the", ":-1] R_to_g = np.matmul(R_init.T, R_g) if np.allclose(R_to_g, np.eye(3)): c_to_g = [np.array([0, 0, 1]),", "vector_orig = vector_orig / np.linalg.norm(vector_orig) vector_fin = vector_fin / np.linalg.norm(vector_fin) # The rotation", "now at: second_goal = np.matmul(Rot1[:-1, :-1].T, second_goal) second_goal = Rotation.from_matrix(second_goal) goal_orients.append([second_goal.as_quat()+[0.001,0.001,0.001,0.001],[init_information[7],init_information[8],init_information[9]]]) #addition of", "to rotate from one vector to another. For the rotation of one vector", "array @param vector_orig: The unrotated vector defined in the reference frame. @type vector_orig:", "current_orient = np.asarray(p.getMatrixFromQuaternion(obs[\"object_orientation\"])).reshape(3, 3) theta, index, sign = closest_axis_2_userdefined(to_H(current_orient), init_orient) des_vec = sign", "into account the desired rotation from the target information: des_rotation = np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3, 3)", "numpy array @param vector_orig: The unrotated vector defined in the reference frame. @type", "\"\"\"Calculate the rotation matrix required to rotate from one vector to another. For", "= y * sa + (1.0 - ca) * x * z R[1,", "numpy as np import pybullet as p def todegree(w): return w*180/np.pi def torad(w):", "self.closest_axis_2_normal(self.Hg) des_vec = np.array([0, 0, sign * 1]) R, r_vec_via2gw, ang_via = self.R_2vect(self.Hg[:-1,", "the reference frame. @type vector_fin: numpy array, len 3 \"\"\" # Convert the", "if np.allclose(rot1g, np.eye(4)): c1g = [np.array([0,0,1]), 0.] else: rot1g = Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec() c1g =", "a unit vector parallel to the rotation axis, w = [x, y, z]", "R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z | | -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1", "axis / axis_len # Alias the axis coordinates. x = axis[0] y =", "the axis-angle convention will be used to construct the matrix with the rotation", "1] = x * sa + (1.0 - ca) * y * z", "= np.eye(4) H[:-1,:-1] = R H[:-1,-1] = T return H def closest_axis_2_userdefined(H, vec):", "closest_axis_2_userdefined(H, vec): #print (H) #print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des = np.array(vec) index", "if x==0 and y==0 and z==0: z=1 # The rotation angle. angle =", "0] R12, r_vec_via12_p, ang_via12 = self.R_2vect(vec_1, vec_2) r_vec_via12w = np.zeros(3) r_vec_via12w[2] = np.sign(r_vec_via12_p[2])", "H_via2[:-1, 1] else: vec_1 = H_via1[:-1, 0] vec_2 = H_via2[:-1, 0] R12, r_vec_via12_p,", "-x * sa + (1.0 - ca) * y * z R[2, 0]", "R_2vect(current_orient[:, index], des_vec) second_goal = np.matmul(Rot1[:-1, :-1], current_orient) # now apply rotation: second_goal", "theta ==0: reverse = True if min_angle > np.abs(theta): min_angle = np.abs(theta) index", "y * z R[2, 2] = 1.0 + (1.0 - ca) * (z", "np.matmul(R_init.T, R_g) if np.allclose(R_to_g, np.eye(3)): c_to_g = [np.array([0, 0, 1]), 0.] else: rot_to_g", "H[:-1,-1] = T return H def closest_axis_2_userdefined(H, vec): #print (H) #print (np.linalg.inv(H[:-1,:-1])) min_angle", "achieve target ## ################## Goal to Viapoint 2 ################################### theta, index, sign =", "need to do this maths once!). ca = np.cos(angle) sa = np.sin(angle) R", "observation!!! # now take into account the desired rotation from the target information:", "Sequence will provide rotation vector and desired rotation to achieve target ## ##################", "= self.R_2vect(vec_1, vec_2) r_vec_via12w = np.zeros(3) r_vec_via12w[2] = np.sign(r_vec_via12_p[2]) r_vec_via12 = np.matmul(H_via1[:-1,:-1].T, r_vec_via12w)", "ang_via12, r_vec_via12w] ########################################################### ##### COMPUTE SHORTCUT: ######## rot_12 = to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix()) rot2g = to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix())", "- 1.0) R[1, 2] = -x * sa + (1.0 - ca) *", "+ (1-cos(a))*(z*z-1) | @param R: The 3x3 rotation matrix to update. @type R:", "to_H(R=R_floor_1) H_via1 = np.matmul(self.H0, R_floor_1) #H_via1[1,-1] = 0.3 else: r_vec_floor = np.zeros(3) r_vec_floor[index]", "to Viapoint 2 ################################### theta, index, sign = self.closest_axis_2_normal(self.Hg) des_vec = np.array([0, 0,", "[r_vec_via2g, -ang_via, r_vec_via2gw] ######################################################################### ############# From Floor to Viapoint 1 #################### index_H0, sign_H0", "* sa + (1.0 - ca) * x * y R[1, 1] =", "to Viapoint 2 ################ if index == 0: vec_1 = H_via1[:-1, 1] vec_2", "[rot1g / np.linalg.norm(rot1g,ord=2), np.linalg.norm(rot1g,ord=2)] ##### Compute rotation from start to Via-2 ## R_via2", "i in range(3): z = H[2, i] # print(z) if np.abs(z) > big_Z:", "find_index_z(self, H): big_Z = 0.0 index = 0 for i in range(3): z", "######################################################################### ############# From Floor to Viapoint 1 #################### index_H0, sign_H0 = self.find_index_z(self.H0) #theta_0", "= H_via1[:-1, 1] vec_2 = H_via2[:-1, 1] else: vec_1 = H_via1[:-1, 0] vec_2", "np.array(vec) index = 0 sign = 0 reverse = False for i in", "- ca) * (x ** 2 - 1.0) R[0, 1] = -z *", "def __init__(self, H0, Hg): self.H0 = H0 self.Hg = Hg def set_goal(self, Hg):", "index == 0: rot_over = 1 else: rot_over = 0 return rot_over, angle", "= True if min_angle > np.abs(theta): min_angle = np.abs(theta) index = i if", "np.linalg.norm(rot1g,ord=2), np.linalg.norm(rot1g,ord=2)] ##### Compute rotation from start to Via-2 ## R_via2 = H_via2[:-1,:-1]", "matrix elements. R[0, 0] = 1.0 + (1.0 - ca) * (x **", "applies the relative transformation which is desired based on the current observation!!! #", "H_via2[:-1, 0] R12, r_vec_via12_p, ang_via12 = self.R_2vect(vec_1, vec_2) r_vec_via12w = np.zeros(3) r_vec_via12w[2] =", "= np.asarray([1,0,0]) theta, index, sign = closest_axis_2_userdefined( to_H(current_orient), init_orient) des_vec = sign *", "def closest_axis_2_normal(self, H): # print (H) # print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des", "sa + (1.0 - ca) * y * z R[2, 2] = 1.0", "used to construct the matrix with the rotation axis defined as the cross", "np.linalg.norm(rot_to_2, ord=2)] ##### Compute rotation from start to Goal ### R_g = self.Hg[:-1,", "second_goal = Rotation.from_matrix(second_goal) goal_orients.append([second_goal.as_quat()+[0.001,0.001,0.001,0.001],[init_information[7],init_information[8],init_information[9]]]) #addition of noise needed since otherwise problems code,... return", "else: angle = np.pi / 2 if add_one(rot_over) != index_H0: angle = -angle", "190 x_des = np.array(vec) index = 0 sign = 0 reverse = False", "to another. For the rotation of one vector to another, there are an", "if index == index_H0: if sign == sign_H0: return None, None else: angle", "z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z | | -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) |", "vector defined in the reference frame. @type vector_orig: numpy array, len 3 @param", "2 - 1.0) R[1, 2] = -x * sa + (1.0 - ca)", "np.matmul(rot_12,rot2g) if np.allclose(rot1g, np.eye(4)): c1g = [np.array([0,0,1]), 0.] else: rot1g = Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec() c1g", "H0, Hg): self.H0 = H0 self.Hg = Hg def set_goal(self, Hg): self.Hg =", "(y ** 2 - 1.0) R[1, 2] = -x * sa + (1.0", "to construct the matrix with the rotation axis defined as the cross product", "i return index, sign def find_rot_z(self, index_H0, sign_H0, index, sign): if index ==", "axis can be any vector lying in the symmetry plane between the two", "rotation angle a, the rotation matrix R is:: | 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y", "else: index_out = index+1 return index_out def to_H(R, T=np.zeros(3)): H = np.eye(4) H[:-1,:-1]", "= np.cos(angle) sa = np.sin(angle) R = np.eye(4) # Calculate the rotation matrix", "R[0, 0] = 1.0 + (1.0 - ca) * (x ** 2 -", "+ (1.0 - ca) * x * z R[2, 1] = x *", "index == index_H0: if sign == sign_H0: return None, None else: angle =", "Compute rotation from start to Goal ### R_g = self.Hg[:-1, :-1] R_init =", "- ca) * y * z R[2, 0] = -y * sa +", "= H[:-1, i] theta = todegree(angle(x, x_des)) #print (theta) if theta > 90:", "= 0 while (rot_over == index or rot_over == index_H0): rot_over += 1", "########################################################### ##### COMPUTE SHORTCUT: ######## rot_12 = to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix()) rot2g = to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix()) rot1g =", "numpy array, len 3 \"\"\" # Convert the vectors to unit vectors. vector_orig", "Viapoint 1 to Viapoint 2 ################ if index == 0: vec_1 = H_via1[:-1,", "we are now at: second_goal = np.matmul(Rot1[:-1, :-1].T, second_goal) second_goal = Rotation.from_matrix(second_goal) goal_orients.append([second_goal.as_quat()+[0.001,0.001,0.001,0.001],[init_information[7],init_information[8],init_information[9]]])", "= H0 def get_control_seq(self, ax=None): ## Control Sequence will provide rotation vector and", "##### COMPUTE SHORTCUT: ######## rot_12 = to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix()) rot2g = to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix()) rot1g = np.matmul(rot_12,rot2g)", "-y * sa + (1.0 - ca) * x * z R[2, 1]", "arccosine of the dot product of the two unit vectors. Given a unit", "the rotation matrix elements. R[0, 0] = 1.0 + (1.0 - ca) *", "angle is the arccosine of the dot product of the two unit vectors.", "= -angle return rot_over, angle def closest_axis_2_normal(self, H): # print (H) # print", "rotation_floor = Rotation.from_rotvec(ang_floor * r_vec_floor) R_floor_1 = rotation_floor.as_matrix() R_floor_1 = to_H(R=R_floor_1) H_via1 =", "ca) * y * z R[2, 0] = -y * sa + (1.0", "rot_index, ang_floor = self.find_rot_z(index_H0, sign_H0, index, sign) if rot_index is not None: r_vec_floor", "==0: reverse = True if min_angle > np.abs(theta): min_angle = np.abs(theta) index =", "else: r_vec_floor = np.zeros(3) r_vec_floor[index] = 1 ang_floor = 0. H_via1 = self.H0", "0.] else: rot1g = Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec() c1g = [rot1g / np.linalg.norm(rot1g,ord=2), np.linalg.norm(rot1g,ord=2)] ##### Compute", "np.matmul(self.H0[:-1,:-1], r_vec_floor) c01 = [r_vec_floor, ang_floor, r_vec_floor_w] #################################################### ############ From Viapoint 1 to", "ang_via12 = self.R_2vect(vec_1, vec_2) r_vec_via12w = np.zeros(3) r_vec_via12w[2] = np.sign(r_vec_via12_p[2]) r_vec_via12 = np.matmul(H_via1[:-1,:-1].T,", "np.linalg.norm(vector_orig) vector_fin = vector_fin / np.linalg.norm(vector_fin) # The rotation axis (normalised). axis =", "= 1 ang_floor = 0. H_via1 = self.H0 r_vec_floor_w = np.matmul(self.H0[:-1,:-1], r_vec_floor) c01", "sign def find_rot_z(self, index_H0, sign_H0, index, sign): if index == index_H0: if sign", "goal orientation for some axis alignment init_orient = np.zeros(3) init_orient[:2] = np.asarray(init_information[:2]) init_orient", "theta, index, sign = closest_axis_2_userdefined( to_H(current_orient), init_orient) des_vec = sign * np.array(init_orient) Rot1,", "axis[2] if x==0 and y==0 and z==0: z=1 # The rotation angle. angle", "of the dot product of the two unit vectors. Given a unit vector", "= sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec) second_goal =", "(z ** 2 - 1.0) return R, axis, angle class RotationPrimitives(): def __init__(self,", "two vectors. Hence the axis-angle convention will be used to construct the matrix", "axis-angle convention will be used to construct the matrix with the rotation axis", "True if min_angle > np.abs(theta): min_angle = np.abs(theta) index = i if theta", "or rot_over == index_H0): rot_over += 1 if sign == sign_H0: angle =", "(1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z | R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z |", "= todegree(angle(x, x_des)) #print (theta) if theta > 90: theta = theta -", "axis = axis / axis_len # Alias the axis coordinates. x = axis[0]", "Hg): self.Hg = Hg def set_current_pose(self,H0): self.H0 = H0 def get_control_seq(self, ax=None): ##", "= np.zeros(3) init_orient[:2] = np.asarray(init_information[:2]) init_orient = init_orient / np.linalg.norm(init_orient) current_orient = np.asarray(p.getMatrixFromQuaternion(obs[\"object_orientation\"])).reshape(3,", "sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec) second_goal = np.matmul(Rot1[:-1,", "len 3 \"\"\" # Convert the vectors to unit vectors. vector_orig = vector_orig", "= np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3, 3) init_orient = np.asarray([1,0,0]) theta, index, sign = closest_axis_2_userdefined( to_H(current_orient), init_orient)", "-1 else: sign = 1 else: sign = np.sign(theta) return min_angle, index, sign", "None, None else: angle = np.pi if index == 0: rot_over = 1", "which is desired based on the current observation!!! # now take into account", "transformation which is desired based on the current observation!!! # now take into", "to Viapoint 1 #################### index_H0, sign_H0 = self.find_index_z(self.H0) #theta_0 ,index_H0, sign_H0 = self.closest_axis_2_normal(self.H0)", "/ np.linalg.norm(rot1g,ord=2), np.linalg.norm(rot1g,ord=2)] ##### Compute rotation from start to Via-2 ## R_via2 =", "the current observation!!! # now take into account the desired rotation from the", "vec): #print (H) #print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des = np.array(vec) index =", "= H0 self.Hg = Hg def set_goal(self, Hg): self.Hg = Hg def set_current_pose(self,H0):", "if sign == sign_H0: angle = -np.pi / 2 if add_one(rot_over) != index_H0:", "is:: | 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z | R = | z*sin(a)+(1-cos(a))*x*y 1", "index == 0: vec_1 = H_via1[:-1, 1] vec_2 = H_via2[:-1, 1] else: vec_1", "1 rotation_floor = Rotation.from_rotvec(ang_floor * r_vec_floor) R_floor_1 = rotation_floor.as_matrix() R_floor_1 = to_H(R=R_floor_1) H_via1", "= unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) def unit_vector(vector): \"\"\" Returns the unit", "np.abs(theta) index = i if theta == 0.: if reverse: sign = -1", "= self.closest_axis_2_normal(self.H0) # print (index_H0, sign_H0, index, sign) # input (\"WAIT\") rot_index, ang_floor", "defined as the cross product of the two vectors. The rotation angle is", "(1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z | | -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) | @param R: The", "H0 self.Hg = Hg def set_goal(self, Hg): self.Hg = Hg def set_current_pose(self,H0): self.H0", "sa + (1.0 - ca) * y * z R[2, 0] = -y", "rotation: second_goal = np.matmul(des_rotation, second_goal) # now rotate back to orientation that we", "vector_fin: The rotated vector defined in the reference frame. @type vector_fin: numpy array,", "(1.0 - ca) * x * y R[1, 1] = 1.0 + (1.0", "= Hg def set_goal(self, Hg): self.Hg = Hg def set_current_pose(self,H0): self.H0 = H0", "if min_angle > np.abs(theta): min_angle = np.abs(theta) index = i if theta ==", "= np.matmul(R_init.T, R_g) if np.allclose(R_to_g, np.eye(3)): c_to_g = [np.array([0, 0, 1]), 0.] else:", "the vectors to unit vectors. vector_orig = vector_orig / np.linalg.norm(vector_orig) vector_fin = vector_fin", "+ (1.0 - ca) * x * y R[1, 1] = 1.0 +", "if index == 0: rot_over = 1 else: rot_over = 0 return rot_over,", "not None: r_vec_floor = np.zeros(3) r_vec_floor[rot_index] = 1 rotation_floor = Rotation.from_rotvec(ang_floor * r_vec_floor)", "self.Hg[:-1, :-1] R_init = self.H0[:-1, :-1] R_to_g = np.matmul(R_init.T, R_g) if np.allclose(R_to_g, np.eye(3)):", "the cross product of the two vectors. The rotation angle is the arccosine", "np.asarray([1,0,0]) theta, index, sign = closest_axis_2_userdefined( to_H(current_orient), init_orient) des_vec = sign * np.array(init_orient)", "1]) index = 0 sign = 0 reverse = False for i in", "def find_index_z(self, H): big_Z = 0.0 index = 0 for i in range(3):", "== 0: rot_over = 1 else: rot_over = 0 return rot_over, angle else:", "/ np.linalg.norm(vector) def add_one(index): if index+1 == 3: index_out = 0 else: index_out", "1 if sign == sign_H0: angle = -np.pi / 2 if add_one(rot_over) !=", "rot_12 = to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix()) rot2g = to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix()) rot1g = np.matmul(rot_12,rot2g) if np.allclose(rot1g, np.eye(4)): c1g", "0 sign = 0 reverse = False for i in range(3): x =", "-z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z | R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z | |", "R_2vect(vector_orig, vector_fin): \"\"\"Calculate the rotation matrix required to rotate from one vector to", "if theta ==0: reverse = True if min_angle > np.abs(theta): min_angle = np.abs(theta)", "des_vec) H_via2 = np.matmul(R, self.Hg) H_via2[:-1,-1] = self.Hg[:-1,-1] H_via2[2, -1] = 0. r_vec_via2g", "= 1 rotation_floor = Rotation.from_rotvec(ang_floor * r_vec_floor) R_floor_1 = rotation_floor.as_matrix() R_floor_1 = to_H(R=R_floor_1)", "symmetry, the rotation axis can be any vector lying in the symmetry plane", "if reverse: sign = -1 else: sign = 1 else: sign = np.sign(theta)", "H_via2 = np.matmul(R, self.Hg) H_via2[:-1,-1] = self.Hg[:-1,-1] H_via2[2, -1] = 0. r_vec_via2g =", "and desired rotation to achieve target ## ################## Goal to Viapoint 2 ###################################", "current_orient) first_goal = Rotation.from_matrix(first_goal) goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]]) #addition of noise needed since otherwise problems code,...", "ca) * x * z R[2, 1] = x * sa + (1.0", "target information: des_rotation = np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3, 3) init_orient = np.asarray([1,0,0]) theta, index, sign =", "current observation!!! # now take into account the desired rotation from the target", "\"\"\" Returns the unit vector of the vector. \"\"\" return vector / np.linalg.norm(vector)", "np.allclose(R_to_2, np.eye(3)): c_to_2 = [np.array([0, 0, 1]), 0.] else: rot_to_2 = Rotation.from_matrix(R_to_2).as_rotvec() c_to_2", "index], des_vec) first_goal = np.matmul(Rot1[:-1, :-1], current_orient) first_goal = Rotation.from_matrix(first_goal) goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]]) #addition of", "################## Goal to Viapoint 2 ################################### theta, index, sign = self.closest_axis_2_normal(self.Hg) des_vec =", "np.linalg.norm(rot_to_2, ord=2), np.linalg.norm(rot_to_2, ord=2)] ##### Compute rotation from start to Goal ### R_g", "the symmetry plane between the two vectors. Hence the axis-angle convention will be", "* x * z R[2, 1] = x * sa + (1.0 -", "= -1 else: sign = 1 else: sign = np.sign(theta) return min_angle, index,", "| | -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) | @param R: The 3x3 rotation", "sign_H0, index, sign) if rot_index is not None: r_vec_floor = np.zeros(3) r_vec_floor[rot_index] =", "\"\"\" # Convert the vectors to unit vectors. vector_orig = vector_orig / np.linalg.norm(vector_orig)", "z==0: z=1 # The rotation angle. angle = np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1)) # Trig functions", "some axis alignment init_orient = np.zeros(3) init_orient[:2] = np.asarray(init_information[:2]) init_orient = init_orient /", "r_vec_via2gw, ang_via = self.R_2vect(self.Hg[:-1, index], des_vec) H_via2 = np.matmul(R, self.Hg) H_via2[:-1,-1] = self.Hg[:-1,-1]", "if axis_len != 0.0: axis = axis / axis_len # Alias the axis", "np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1)) # Trig functions (only need to do this maths once!). ca", "add_one(rot_over) != index_H0: angle = -angle return rot_over, angle def closest_axis_2_normal(self, H): #", "rotate back to orientation that we are now at: second_goal = np.matmul(Rot1[:-1, :-1].T,", "# Calculate the rotation matrix elements. R[0, 0] = 1.0 + (1.0 -", "return rot_over, angle else: rot_over = 0 while (rot_over == index or rot_over", "0. r_vec_via2g = np.matmul(H_via2[:-1,:-1].T, r_vec_via2gw) c2g = [r_vec_via2g, -ang_via, r_vec_via2gw] ######################################################################### ############# From", "while (rot_over == index or rot_over == index_H0): rot_over += 1 if sign", "index+1 return index_out def to_H(R, T=np.zeros(3)): H = np.eye(4) H[:-1,:-1] = R H[:-1,-1]", "self.find_rot_z(index_H0, sign_H0, index, sign) if rot_index is not None: r_vec_floor = np.zeros(3) r_vec_floor[rot_index]", "based on the current observation!!! # now take into account the desired rotation", "R_to_2 = np.matmul(R_init.T, R_via2) if np.allclose(R_to_2, np.eye(3)): c_to_2 = [np.array([0, 0, 1]), 0.]", "else: rot_over = 0 while (rot_over == index or rot_over == index_H0): rot_over", "[r_vec_floor, ang_floor, r_vec_floor_w] #################################################### ############ From Viapoint 1 to Viapoint 2 ################ if", "= H_via2[:-1, 1] else: vec_1 = H_via1[:-1, 0] vec_2 = H_via2[:-1, 0] R12,", "is desired based on the current observation!!! # now take into account the", "v2_u = unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) def unit_vector(vector): \"\"\" Returns the", "np.linalg.norm(rot1g,ord=2)] ##### Compute rotation from start to Via-2 ## R_via2 = H_via2[:-1,:-1] R_init", "rotate from one vector to another. For the rotation of one vector to", "product of the two unit vectors. Given a unit vector parallel to the", "- 180 if theta ==0: reverse = True if min_angle > np.abs(theta): min_angle", "= 190 x_des = np.array([0, 0, 1]) index = 0 sign = 0", "update. @type R: 3x3 numpy array @param vector_orig: The unrotated vector defined in", "== sign_H0: angle = -np.pi / 2 if add_one(rot_over) != index_H0: angle =", ":-1].T, second_goal) second_goal = Rotation.from_matrix(second_goal) goal_orients.append([second_goal.as_quat()+[0.001,0.001,0.001,0.001],[init_information[7],init_information[8],init_information[9]]]) #addition of noise needed since otherwise problems", "H0 def get_control_seq(self, ax=None): ## Control Sequence will provide rotation vector and desired", "T return H def closest_axis_2_userdefined(H, vec): #print (H) #print (np.linalg.inv(H[:-1,:-1])) min_angle = 190", "> big_Z: big_Z = np.abs(z) sign = np.sign(z) index = i return index,", "np.asarray(p.getMatrixFromQuaternion(obs[\"object_orientation\"])).reshape(3, 3) theta, index, sign = closest_axis_2_userdefined(to_H(current_orient), init_orient) des_vec = sign * np.array(init_orient)", "2 - 1.0) return R, axis, angle def calculate_mutltiple_goals(init_information, obs): goal_orients = []", "angle = np.pi / 2 if add_one(rot_over) != index_H0: angle = -angle return", "add_one(index): if index+1 == 3: index_out = 0 else: index_out = index+1 return", "return H def closest_axis_2_userdefined(H, vec): #print (H) #print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des", "#addition of noise needed since otherwise problems code,... # this second calculation applies", "self.find_index_z(self.H0) #theta_0 ,index_H0, sign_H0 = self.closest_axis_2_normal(self.H0) # print (index_H0, sign_H0, index, sign) #", "The rotation axis (normalised). axis = np.cross(vector_orig, vector_fin) axis_len = np.linalg.norm(axis) if axis_len", "R, axis, angle def calculate_mutltiple_goals(init_information, obs): goal_orients = [] # This first calcuation", "= np.sign(z) index = i return index, sign def find_rot_z(self, index_H0, sign_H0, index,", "= closest_axis_2_userdefined( to_H(current_orient), init_orient) des_vec = sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via =", "def calculate_mutltiple_goals(init_information, obs): goal_orients = [] # This first calcuation step allows to", "!= index_H0: angle = -angle return rot_over, angle def closest_axis_2_normal(self, H): # print", "np.cos(angle) sa = np.sin(angle) R = np.eye(4) # Calculate the rotation matrix elements.", "# Convert the vectors to unit vectors. vector_orig = vector_orig / np.linalg.norm(vector_orig) vector_fin", "index, sign def find_rot_z(self, index_H0, sign_H0, index, sign): if index == index_H0: if", "rotation axis (normalised). axis = np.cross(vector_orig, vector_fin) axis_len = np.linalg.norm(axis) if axis_len !=", "otherwise problems code,... # this second calculation applies the relative transformation which is", "= -z * sa + (1.0 - ca) * x * y R[0,", "Due to axially symmetry, the rotation axis can be any vector lying in", "target ## ################## Goal to Viapoint 2 ################################### theta, index, sign = self.closest_axis_2_normal(self.Hg)", "command_seq = [c01, c12,c2g] return command_seq, [c1g], [c_to_2, c_to_g] def find_index_z(self, H): big_Z", "1.0) return R, axis, angle class RotationPrimitives(): def __init__(self, H0, Hg): self.H0 =", "= 0 else: index_out = index+1 return index_out def to_H(R, T=np.zeros(3)): H =", "vector_orig, vector_fin): \"\"\"Calculate the rotation matrix required to rotate from one vector to", "np.sign(theta) return min_angle, index, sign def R_2vect(self, vector_orig, vector_fin): \"\"\"Calculate the rotation matrix", "the target information: des_rotation = np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3, 3) init_orient = np.asarray([1,0,0]) theta, index, sign", "big_Z: big_Z = np.abs(z) sign = np.sign(z) index = i return index, sign", "back to orientation that we are now at: second_goal = np.matmul(Rot1[:-1, :-1].T, second_goal)", "construct the matrix with the rotation axis defined as the cross product of", "as the cross product of the two vectors. The rotation angle is the", "H[:-1, i] theta = todegree(angle(x, x_des)) #print (theta) if theta > 90: theta", "np.allclose(rot1g, np.eye(4)): c1g = [np.array([0,0,1]), 0.] else: rot1g = Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec() c1g = [rot1g", "= 1 else: rot_over = 0 return rot_over, angle else: rot_over = 0", "__init__(self, H0, Hg): self.H0 = H0 self.Hg = Hg def set_goal(self, Hg): self.Hg", "reverse = False for i in range(3): x = H[:-1, i] theta =", "as np from scipy.spatial.transform import Rotation import numpy as np import pybullet as", "H_via2[2, -1] = 0. r_vec_via2g = np.matmul(H_via2[:-1,:-1].T, r_vec_via2gw) c2g = [r_vec_via2g, -ang_via, r_vec_via2gw]", "1.0 + (1.0 - ca) * (y ** 2 - 1.0) R[1, 2]", "theta > 90: theta = theta - 180 if theta ==0: reverse =", "of the two unit vectors. Given a unit vector parallel to the rotation", "r_vec_via2gw) c2g = [r_vec_via2g, -ang_via, r_vec_via2gw] ######################################################################### ############# From Floor to Viapoint 1", "np.eye(4)): c1g = [np.array([0,0,1]), 0.] else: rot1g = Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec() c1g = [rot1g /", "ca) * x * y R[1, 1] = 1.0 + (1.0 - ca)", "return min_angle, index, sign def R_2vect(self, vector_orig, vector_fin): \"\"\"Calculate the rotation matrix required", "> 90: theta = theta - 180 if theta ==0: reverse = True", "x * sa + (1.0 - ca) * y * z R[2, 2]", "goal_orients = [] # This first calcuation step allows to calculate a viapoint!", "@type R: 3x3 numpy array @param vector_orig: The unrotated vector defined in the", "def R_2vect(self, vector_orig, vector_fin): \"\"\"Calculate the rotation matrix required to rotate from one", "#theta_0 ,index_H0, sign_H0 = self.closest_axis_2_normal(self.H0) # print (index_H0, sign_H0, index, sign) # input", "= -angle else: angle = np.pi / 2 if add_one(rot_over) != index_H0: angle", "init_orient / np.linalg.norm(init_orient) current_orient = np.asarray(p.getMatrixFromQuaternion(obs[\"object_orientation\"])).reshape(3, 3) theta, index, sign = closest_axis_2_userdefined(to_H(current_orient), init_orient)", "in range(3): z = H[2, i] # print(z) if np.abs(z) > big_Z: big_Z", "# print (index_H0, sign_H0, index, sign) # input (\"WAIT\") rot_index, ang_floor = self.find_rot_z(index_H0,", "== index_H0): rot_over += 1 if sign == sign_H0: angle = -np.pi /", "rotation axis defined as the cross product of the two vectors. The rotation", "rotation matrix R is:: | 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z | R =", "des_vec) first_goal = np.matmul(Rot1[:-1, :-1], current_orient) first_goal = Rotation.from_matrix(first_goal) goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]]) #addition of noise", "of the vector. \"\"\" return vector / np.linalg.norm(vector) def add_one(index): if index+1 ==", "1] = 1.0 + (1.0 - ca) * (y ** 2 - 1.0)", "** 2 - 1.0) return R, axis, angle class RotationPrimitives(): def __init__(self, H0,", "/ 2 if add_one(rot_over) != index_H0: angle = -angle return rot_over, angle def", "angle = np.pi if index == 0: rot_over = 1 else: rot_over =", "second_goal = np.matmul(Rot1[:-1, :-1], current_orient) # now apply rotation: second_goal = np.matmul(des_rotation, second_goal)", "R_floor_1) #H_via1[1,-1] = 0.3 else: r_vec_floor = np.zeros(3) r_vec_floor[index] = 1 ang_floor =", "= H[:-1, i] theta = todegree(angle(x, x_des)) # print (theta) if theta >", "np.matmul(self.H0, R_floor_1) #H_via1[1,-1] = 0.3 else: r_vec_floor = np.zeros(3) r_vec_floor[index] = 1 ang_floor", "- ca) * x * z R[1, 0] = z * sa +", "= self.H0[:-1, :-1] R_to_g = np.matmul(R_init.T, R_g) if np.allclose(R_to_g, np.eye(3)): c_to_g = [np.array([0,", "r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec) first_goal = np.matmul(Rot1[:-1, :-1], current_orient) first_goal =", "## Control Sequence will provide rotation vector and desired rotation to achieve target", "= Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec() c1g = [rot1g / np.linalg.norm(rot1g,ord=2), np.linalg.norm(rot1g,ord=2)] ##### Compute rotation from start", "* 1]) R, r_vec_via2gw, ang_via = self.R_2vect(self.Hg[:-1, index], des_vec) H_via2 = np.matmul(R, self.Hg)", "ca = np.cos(angle) sa = np.sin(angle) R = np.eye(4) # Calculate the rotation", "any vector lying in the symmetry plane between the two vectors. Hence the", "R[0, 2] = y * sa + (1.0 - ca) * x *", "w*np.pi/180 def angle(v1, v2): v1_u = unit_vector(v1) v2_u = unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u),", "> np.abs(theta): min_angle = np.abs(theta) index = i if theta == 0.: if", "= [np.array([0,0,1]), 0.] else: rot1g = Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec() c1g = [rot1g / np.linalg.norm(rot1g,ord=2), np.linalg.norm(rot1g,ord=2)]", "to Goal ### R_g = self.Hg[:-1, :-1] R_init = self.H0[:-1, :-1] R_to_g =", "def to_H(R, T=np.zeros(3)): H = np.eye(4) H[:-1,:-1] = R H[:-1,-1] = T return", "sign = self.closest_axis_2_normal(self.Hg) des_vec = np.array([0, 0, sign * 1]) R, r_vec_via2gw, ang_via", "R: The 3x3 rotation matrix to update. @type R: 3x3 numpy array @param", "reference frame. @type vector_orig: numpy array, len 3 @param vector_fin: The rotated vector", "R[2, 0] = -y * sa + (1.0 - ca) * x *", "provide rotation vector and desired rotation to achieve target ## ################## Goal to", "= index+1 return index_out def to_H(R, T=np.zeros(3)): H = np.eye(4) H[:-1,:-1] = R", "init_orient) des_vec = sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec)", "index = 0 sign = 0 reverse = False for i in range(3):", "vector_fin = vector_fin / np.linalg.norm(vector_fin) # The rotation axis (normalised). axis = np.cross(vector_orig,", "z R[2, 0] = -y * sa + (1.0 - ca) * x", "+ (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z | R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z", "= np.pi / 2 if add_one(rot_over) != index_H0: angle = -angle return rot_over,", "= np.array([0, 0, sign * 1]) R, r_vec_via2gw, ang_via = self.R_2vect(self.Hg[:-1, index], des_vec)", "return command_seq, [c1g], [c_to_2, c_to_g] def find_index_z(self, H): big_Z = 0.0 index =", "range(3): x = H[:-1, i] theta = todegree(angle(x, x_des)) #print (theta) if theta", "+= 1 if sign == sign_H0: angle = -np.pi / 2 if add_one(rot_over)", "is not None: r_vec_floor = np.zeros(3) r_vec_floor[rot_index] = 1 rotation_floor = Rotation.from_rotvec(ang_floor *", "sign = closest_axis_2_userdefined( to_H(current_orient), init_orient) des_vec = sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via", "| -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) | @param R: The 3x3 rotation matrix", "The 3x3 rotation matrix to update. @type R: 3x3 numpy array @param vector_orig:", "0] = 1.0 + (1.0 - ca) * (x ** 2 - 1.0)", "def torad(w): return w*np.pi/180 def angle(v1, v2): v1_u = unit_vector(v1) v2_u = unit_vector(v2)", "= H_via2[:-1, 0] R12, r_vec_via12_p, ang_via12 = self.R_2vect(vec_1, vec_2) r_vec_via12w = np.zeros(3) r_vec_via12w[2]", "second_goal = np.matmul(des_rotation, second_goal) # now rotate back to orientation that we are", "= T return H def closest_axis_2_userdefined(H, vec): #print (H) #print (np.linalg.inv(H[:-1,:-1])) min_angle =", "y*sin(a)+(1-cos(a))*x*z | R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z | | -y*sin(a)+(1-cos(a))*x*z", "= np.matmul(self.H0[:-1,:-1], r_vec_floor) c01 = [r_vec_floor, ang_floor, r_vec_floor_w] #################################################### ############ From Viapoint 1", "vector to another. For the rotation of one vector to another, there are", "obs): goal_orients = [] # This first calcuation step allows to calculate a", "matrices possible. Due to axially symmetry, the rotation axis can be any vector", "= np.abs(theta) index = i if theta == 0.: if reverse: sign =", "sign = np.sign(theta) return min_angle, index, sign def R_2vect(self, vector_orig, vector_fin): \"\"\"Calculate the", "0.] else: rot_to_2 = Rotation.from_matrix(R_to_2).as_rotvec() c_to_2 = [rot_to_2 / np.linalg.norm(rot_to_2, ord=2), np.linalg.norm(rot_to_2, ord=2)]", "one vector to another, there are an infinit series of rotation matrices possible.", "+ (1.0 - ca) * (y ** 2 - 1.0) R[1, 2] =", "z] and the rotation angle a, the rotation matrix R is:: | 1", "y R[0, 2] = y * sa + (1.0 - ca) * x", "np.linalg.norm(rot_to_g, ord=2)] command_seq = [c01, c12,c2g] return command_seq, [c1g], [c_to_2, c_to_g] def find_index_z(self,", "np.matmul(Rot1[:-1, :-1].T, second_goal) second_goal = Rotation.from_matrix(second_goal) goal_orients.append([second_goal.as_quat()+[0.001,0.001,0.001,0.001],[init_information[7],init_information[8],init_information[9]]]) #addition of noise needed since otherwise", "rot1g = np.matmul(rot_12,rot2g) if np.allclose(rot1g, np.eye(4)): c1g = [np.array([0,0,1]), 0.] else: rot1g =", "else: rot_to_2 = Rotation.from_matrix(R_to_2).as_rotvec() c_to_2 = [rot_to_2 / np.linalg.norm(rot_to_2, ord=2), np.linalg.norm(rot_to_2, ord=2)] #####", "orientation for some axis alignment init_orient = np.zeros(3) init_orient[:2] = np.asarray(init_information[:2]) init_orient =", "#################### index_H0, sign_H0 = self.find_index_z(self.H0) #theta_0 ,index_H0, sign_H0 = self.closest_axis_2_normal(self.H0) # print (index_H0,", "+ (1.0 - ca) * y * z R[2, 0] = -y *", "axis defined as the cross product of the two vectors. The rotation angle", "account the desired rotation from the target information: des_rotation = np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3, 3) init_orient", "c2g = [r_vec_via2g, -ang_via, r_vec_via2gw] ######################################################################### ############# From Floor to Viapoint 1 ####################", "1]) R, r_vec_via2gw, ang_via = self.R_2vect(self.Hg[:-1, index], des_vec) H_via2 = np.matmul(R, self.Hg) H_via2[:-1,-1]", "the vector. \"\"\" return vector / np.linalg.norm(vector) def add_one(index): if index+1 == 3:", "ord=2), np.linalg.norm(rot_to_g, ord=2)] command_seq = [c01, c12,c2g] return command_seq, [c1g], [c_to_2, c_to_g] def", "# print(z) if np.abs(z) > big_Z: big_Z = np.abs(z) sign = np.sign(z) index", "= np.zeros(3) r_vec_floor[rot_index] = 1 rotation_floor = Rotation.from_rotvec(ang_floor * r_vec_floor) R_floor_1 = rotation_floor.as_matrix()", "np.sign(theta) return min_angle, index, sign def R_2vect(vector_orig, vector_fin): \"\"\"Calculate the rotation matrix required", "unit vectors. Given a unit vector parallel to the rotation axis, w =", "= 0.3 else: r_vec_floor = np.zeros(3) r_vec_floor[index] = 1 ang_floor = 0. H_via1", "def closest_axis_2_userdefined(H, vec): #print (H) #print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des = np.array(vec)", "R[0, 1] = -z * sa + (1.0 - ca) * x *", "= 1.0 + (1.0 - ca) * (x ** 2 - 1.0) R[0,", "-np.pi / 2 if add_one(rot_over) != index_H0: angle = -angle else: angle =", "@type vector_orig: numpy array, len 3 @param vector_fin: The rotated vector defined in", "z=1 # The rotation angle. angle = np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1)) # Trig functions (only", "the rotation axis can be any vector lying in the symmetry plane between", "(theta) if theta > 90: theta = theta - 180 if theta ==0:", "= self.find_rot_z(index_H0, sign_H0, index, sign) if rot_index is not None: r_vec_floor = np.zeros(3)", "return index, sign def find_rot_z(self, index_H0, sign_H0, index, sign): if index == index_H0:", "big_Z = np.abs(z) sign = np.sign(z) index = i return index, sign def", "print (theta) if theta > 90: theta = theta - 180 if theta", "0 while (rot_over == index or rot_over == index_H0): rot_over += 1 if", "x_des = np.array([0, 0, 1]) index = 0 sign = 0 reverse =", "(1-cos(a))*(z*z-1) | @param R: The 3x3 rotation matrix to update. @type R: 3x3", "The rotation angle. angle = np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1)) # Trig functions (only need to", "## R_via2 = H_via2[:-1,:-1] R_init = self.H0[:-1,:-1] R_to_2 = np.matmul(R_init.T, R_via2) if np.allclose(R_to_2,", "Returns the unit vector of the vector. \"\"\" return vector / np.linalg.norm(vector) def", "Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec) first_goal = np.matmul(Rot1[:-1, :-1], current_orient) first_goal", "(z ** 2 - 1.0) return R, axis, angle def calculate_mutltiple_goals(init_information, obs): goal_orients", "rot_to_2 = Rotation.from_matrix(R_to_2).as_rotvec() c_to_2 = [rot_to_2 / np.linalg.norm(rot_to_2, ord=2), np.linalg.norm(rot_to_2, ord=2)] ##### Compute", "sign def R_2vect(vector_orig, vector_fin): \"\"\"Calculate the rotation matrix required to rotate from one", "Hg def set_current_pose(self,H0): self.H0 = H0 def get_control_seq(self, ax=None): ## Control Sequence will", "np.pi / 2 if add_one(rot_over) != index_H0: angle = -angle return rot_over, angle", "= np.sin(angle) R = np.eye(4) # Calculate the rotation matrix elements. R[0, 0]", "axis[0] y = axis[1] z = axis[2] if x==0 and y==0 and z==0:", "are now at: second_goal = np.matmul(Rot1[:-1, :-1].T, second_goal) second_goal = Rotation.from_matrix(second_goal) goal_orients.append([second_goal.as_quat()+[0.001,0.001,0.001,0.001],[init_information[7],init_information[8],init_information[9]]]) #addition", "reference frame. @type vector_fin: numpy array, len 3 \"\"\" # Convert the vectors", "theta = theta - 180 if theta ==0: reverse = True if min_angle", "190 x_des = np.array([0, 0, 1]) index = 0 sign = 0 reverse", "v2): v1_u = unit_vector(v1) v2_u = unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) def", "H[:-1, i] theta = todegree(angle(x, x_des)) # print (theta) if theta > 90:", "of rotation matrices possible. Due to axially symmetry, the rotation axis can be", "self.Hg[:-1,-1] H_via2[2, -1] = 0. r_vec_via2g = np.matmul(H_via2[:-1,:-1].T, r_vec_via2gw) c2g = [r_vec_via2g, -ang_via,", "index_H0, sign_H0, index, sign): if index == index_H0: if sign == sign_H0: return", "rot_over = 1 else: rot_over = 0 return rot_over, angle else: rot_over =", "apply rotation: second_goal = np.matmul(des_rotation, second_goal) # now rotate back to orientation that", "/ np.linalg.norm(rot_to_g, ord=2), np.linalg.norm(rot_to_g, ord=2)] command_seq = [c01, c12,c2g] return command_seq, [c1g], [c_to_2,", "- ca) * (y ** 2 - 1.0) R[1, 2] = -x *", "np.abs(theta): min_angle = np.abs(theta) index = i if theta == 0.: if reverse:", "= Rotation.from_matrix(R_to_2).as_rotvec() c_to_2 = [rot_to_2 / np.linalg.norm(rot_to_2, ord=2), np.linalg.norm(rot_to_2, ord=2)] ##### Compute rotation", "ang_floor = self.find_rot_z(index_H0, sign_H0, index, sign) if rot_index is not None: r_vec_floor =", "two unit vectors. Given a unit vector parallel to the rotation axis, w", "= np.sign(theta) return min_angle, index, sign def R_2vect(vector_orig, vector_fin): \"\"\"Calculate the rotation matrix", "return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) def unit_vector(vector): \"\"\" Returns the unit vector of", "-1.0, 1.0)) def unit_vector(vector): \"\"\" Returns the unit vector of the vector. \"\"\"", "@param R: The 3x3 rotation matrix to update. @type R: 3x3 numpy array", "sign = 1 else: sign = np.sign(theta) return min_angle, index, sign def R_2vect(vector_orig,", "= np.pi if index == 0: rot_over = 1 else: rot_over = 0", "self.H0[:-1,:-1] R_to_2 = np.matmul(R_init.T, R_via2) if np.allclose(R_to_2, np.eye(3)): c_to_2 = [np.array([0, 0, 1]),", "np.sign(z) index = i return index, sign def find_rot_z(self, index_H0, sign_H0, index, sign):", "R[1, 0] = z * sa + (1.0 - ca) * x *", "cross product of the two vectors. The rotation angle is the arccosine of", "np.allclose(R_to_g, np.eye(3)): c_to_g = [np.array([0, 0, 1]), 0.] else: rot_to_g = Rotation.from_matrix(R_to_g).as_rotvec() c_to_g", "rotation from the target information: des_rotation = np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3, 3) init_orient = np.asarray([1,0,0]) theta,", "= 0 for i in range(3): z = H[2, i] # print(z) if", "i if theta == 0.: if reverse: sign = -1 else: sign =", "0] = z * sa + (1.0 - ca) * x * y", "c_to_2 = [np.array([0, 0, 1]), 0.] else: rot_to_2 = Rotation.from_matrix(R_to_2).as_rotvec() c_to_2 = [rot_to_2", "angle class RotationPrimitives(): def __init__(self, H0, Hg): self.H0 = H0 self.Hg = Hg", "x_des)) #print (theta) if theta > 90: theta = theta - 180 if", "numpy array, len 3 @param vector_fin: The rotated vector defined in the reference", "= 1 else: sign = np.sign(theta) return min_angle, index, sign def R_2vect(self, vector_orig,", "/ np.linalg.norm(vector_orig) vector_fin = vector_fin / np.linalg.norm(vector_fin) # The rotation axis (normalised). axis", "The unrotated vector defined in the reference frame. @type vector_orig: numpy array, len", "1.0) return R, axis, angle def calculate_mutltiple_goals(init_information, obs): goal_orients = [] # This", "and the rotation angle a, the rotation matrix R is:: | 1 +", "infinit series of rotation matrices possible. Due to axially symmetry, the rotation axis", "/ np.linalg.norm(init_orient) current_orient = np.asarray(p.getMatrixFromQuaternion(obs[\"object_orientation\"])).reshape(3, 3) theta, index, sign = closest_axis_2_userdefined(to_H(current_orient), init_orient) des_vec", "return None, None else: angle = np.pi if index == 0: rot_over =", "second_goal = np.matmul(Rot1[:-1, :-1].T, second_goal) second_goal = Rotation.from_matrix(second_goal) goal_orients.append([second_goal.as_quat()+[0.001,0.001,0.001,0.001],[init_information[7],init_information[8],init_information[9]]]) #addition of noise needed", "* (x ** 2 - 1.0) R[0, 1] = -z * sa +", "= R_2vect(current_orient[:, index], des_vec) second_goal = np.matmul(Rot1[:-1, :-1], current_orient) # now apply rotation:", "r_vec_via12_p, ang_via12 = self.R_2vect(vec_1, vec_2) r_vec_via12w = np.zeros(3) r_vec_via12w[2] = np.sign(r_vec_via12_p[2]) r_vec_via12 =", "maths once!). ca = np.cos(angle) sa = np.sin(angle) R = np.eye(4) # Calculate", "0 return rot_over, angle else: rot_over = 0 while (rot_over == index or", "== 0.: if reverse: sign = -1 else: sign = 1 else: sign", "ang_floor, r_vec_floor_w] #################################################### ############ From Viapoint 1 to Viapoint 2 ################ if index", "2] = -x * sa + (1.0 - ca) * y * z", "H = np.eye(4) H[:-1,:-1] = R H[:-1,-1] = T return H def closest_axis_2_userdefined(H,", "unit vector of the vector. \"\"\" return vector / np.linalg.norm(vector) def add_one(index): if", "torad(w): return w*np.pi/180 def angle(v1, v2): v1_u = unit_vector(v1) v2_u = unit_vector(v2) return", "ord=2), np.linalg.norm(rot_to_2, ord=2)] ##### Compute rotation from start to Goal ### R_g =", "2] = y * sa + (1.0 - ca) * x * z", "#print (theta) if theta > 90: theta = theta - 180 if theta", "index, sign = closest_axis_2_userdefined(to_H(current_orient), init_orient) des_vec = sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via", "step allows to calculate a viapoint! I.e. it yields a goal orientation for", "For the rotation of one vector to another, there are an infinit series", "possible. Due to axially symmetry, the rotation axis can be any vector lying", "+ (1.0 - ca) * y * z R[2, 2] = 1.0 +", "# print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des = np.array([0, 0, 1]) index =", "des_vec) second_goal = np.matmul(Rot1[:-1, :-1], current_orient) # now apply rotation: second_goal = np.matmul(des_rotation,", "np.zeros(3) init_orient[:2] = np.asarray(init_information[:2]) init_orient = init_orient / np.linalg.norm(init_orient) current_orient = np.asarray(p.getMatrixFromQuaternion(obs[\"object_orientation\"])).reshape(3, 3)", "of one vector to another, there are an infinit series of rotation matrices", "sign = np.sign(theta) return min_angle, index, sign def R_2vect(vector_orig, vector_fin): \"\"\"Calculate the rotation", "vector lying in the symmetry plane between the two vectors. Hence the axis-angle", "0 reverse = False for i in range(3): x = H[:-1, i] theta", "* z R[1, 0] = z * sa + (1.0 - ca) *", "return index_out def to_H(R, T=np.zeros(3)): H = np.eye(4) H[:-1,:-1] = R H[:-1,-1] =", "R_2vect(current_orient[:, index], des_vec) first_goal = np.matmul(Rot1[:-1, :-1], current_orient) first_goal = Rotation.from_matrix(first_goal) goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]]) #addition", "* y * z R[2, 2] = 1.0 + (1.0 - ca) *", "2 if add_one(rot_over) != index_H0: angle = -angle return rot_over, angle def closest_axis_2_normal(self,", "= Rotation.from_matrix(first_goal) goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]]) #addition of noise needed since otherwise problems code,... # this", "= 1.0 + (1.0 - ca) * (y ** 2 - 1.0) R[1,", "ang_via = R_2vect(current_orient[:, index], des_vec) first_goal = np.matmul(Rot1[:-1, :-1], current_orient) first_goal = Rotation.from_matrix(first_goal)", "# now rotate back to orientation that we are now at: second_goal =", "= i if theta == 0.: if reverse: sign = -1 else: sign", "if add_one(rot_over) != index_H0: angle = -angle return rot_over, angle def closest_axis_2_normal(self, H):", "1 #################### index_H0, sign_H0 = self.find_index_z(self.H0) #theta_0 ,index_H0, sign_H0 = self.closest_axis_2_normal(self.H0) # print", "3) init_orient = np.asarray([1,0,0]) theta, index, sign = closest_axis_2_userdefined( to_H(current_orient), init_orient) des_vec =", "array, len 3 \"\"\" # Convert the vectors to unit vectors. vector_orig =", "0.0: axis = axis / axis_len # Alias the axis coordinates. x =", "= np.linalg.norm(axis) if axis_len != 0.0: axis = axis / axis_len # Alias", "def set_current_pose(self,H0): self.H0 = H0 def get_control_seq(self, ax=None): ## Control Sequence will provide", "self.closest_axis_2_normal(self.H0) # print (index_H0, sign_H0, index, sign) # input (\"WAIT\") rot_index, ang_floor =", "* (z ** 2 - 1.0) return R, axis, angle class RotationPrimitives(): def", "first calcuation step allows to calculate a viapoint! I.e. it yields a goal", "1]), 0.] else: rot_to_2 = Rotation.from_matrix(R_to_2).as_rotvec() c_to_2 = [rot_to_2 / np.linalg.norm(rot_to_2, ord=2), np.linalg.norm(rot_to_2,", "for i in range(3): z = H[2, i] # print(z) if np.abs(z) >", "import numpy as np from scipy.spatial.transform import Rotation import numpy as np import", "min_angle > np.abs(theta): min_angle = np.abs(theta) index = i if theta == 0.:", "= sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec) first_goal =", "x = H[:-1, i] theta = todegree(angle(x, x_des)) # print (theta) if theta", "the rotation matrix required to rotate from one vector to another. For the", "(1.0 - ca) * x * z R[2, 1] = x * sa", "the axis coordinates. x = axis[0] y = axis[1] z = axis[2] if", "| 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z | R = | z*sin(a)+(1-cos(a))*x*y 1 +", "\"\"\" return vector / np.linalg.norm(vector) def add_one(index): if index+1 == 3: index_out =", "product of the two vectors. The rotation angle is the arccosine of the", "vector_orig: The unrotated vector defined in the reference frame. @type vector_orig: numpy array,", "sa + (1.0 - ca) * x * z R[2, 1] = x", "frame. @type vector_fin: numpy array, len 3 \"\"\" # Convert the vectors to", ":-1], current_orient) first_goal = Rotation.from_matrix(first_goal) goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]]) #addition of noise needed since otherwise problems", "add_one(rot_over) != index_H0: angle = -angle else: angle = np.pi / 2 if", "= | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z | | -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 +", "rotation to achieve target ## ################## Goal to Viapoint 2 ################################### theta, index,", "vector_fin: numpy array, len 3 \"\"\" # Convert the vectors to unit vectors.", "relative transformation which is desired based on the current observation!!! # now take", "(\"WAIT\") rot_index, ang_floor = self.find_rot_z(index_H0, sign_H0, index, sign) if rot_index is not None:", "* sa + (1.0 - ca) * x * z R[2, 1] =", "an infinit series of rotation matrices possible. Due to axially symmetry, the rotation", "(H) #print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des = np.array(vec) index = 0 sign", "v2_u), -1.0, 1.0)) def unit_vector(vector): \"\"\" Returns the unit vector of the vector.", "= np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1)) # Trig functions (only need to do this maths once!).", "one vector to another. For the rotation of one vector to another, there", "that we are now at: second_goal = np.matmul(Rot1[:-1, :-1].T, second_goal) second_goal = Rotation.from_matrix(second_goal)", "sa + (1.0 - ca) * x * y R[0, 2] = y", "0] vec_2 = H_via2[:-1, 0] R12, r_vec_via12_p, ang_via12 = self.R_2vect(vec_1, vec_2) r_vec_via12w =", "range(3): x = H[:-1, i] theta = todegree(angle(x, x_des)) # print (theta) if", "in range(3): x = H[:-1, i] theta = todegree(angle(x, x_des)) #print (theta) if", "COMPUTE SHORTCUT: ######## rot_12 = to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix()) rot2g = to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix()) rot1g = np.matmul(rot_12,rot2g) if", "index_out = index+1 return index_out def to_H(R, T=np.zeros(3)): H = np.eye(4) H[:-1,:-1] =", "plane between the two vectors. Hence the axis-angle convention will be used to", "r_vec_via12w[2] = np.sign(r_vec_via12_p[2]) r_vec_via12 = np.matmul(H_via1[:-1,:-1].T, r_vec_via12w) c12 = [r_vec_via12, ang_via12, r_vec_via12w] ###########################################################", "* sa + (1.0 - ca) * x * y R[0, 2] =", "else: rot_over = 0 return rot_over, angle else: rot_over = 0 while (rot_over", "index, sign) # input (\"WAIT\") rot_index, ang_floor = self.find_rot_z(index_H0, sign_H0, index, sign) if", "vector_fin) axis_len = np.linalg.norm(axis) if axis_len != 0.0: axis = axis / axis_len", "r_vec_floor_w = np.matmul(self.H0[:-1,:-1], r_vec_floor) c01 = [r_vec_floor, ang_floor, r_vec_floor_w] #################################################### ############ From Viapoint", "= self.find_index_z(self.H0) #theta_0 ,index_H0, sign_H0 = self.closest_axis_2_normal(self.H0) # print (index_H0, sign_H0, index, sign)", "x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) | @param R: The 3x3 rotation matrix to update.", "np.cross(vector_orig, vector_fin) axis_len = np.linalg.norm(axis) if axis_len != 0.0: axis = axis /", "r_vec_floor_w] #################################################### ############ From Viapoint 1 to Viapoint 2 ################ if index ==", "= False for i in range(3): x = H[:-1, i] theta = todegree(angle(x,", "H): big_Z = 0.0 index = 0 for i in range(3): z =", "R[1, 1] = 1.0 + (1.0 - ca) * (y ** 2 -", "# print (H) # print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des = np.array([0, 0,", "= z * sa + (1.0 - ca) * x * y R[1,", "sign = 1 else: sign = np.sign(theta) return min_angle, index, sign def R_2vect(self,", "Hence the axis-angle convention will be used to construct the matrix with the", "2 ################################### theta, index, sign = self.closest_axis_2_normal(self.Hg) des_vec = np.array([0, 0, sign *", "r_vec_via12w = np.zeros(3) r_vec_via12w[2] = np.sign(r_vec_via12_p[2]) r_vec_via12 = np.matmul(H_via1[:-1,:-1].T, r_vec_via12w) c12 = [r_vec_via12,", "if add_one(rot_over) != index_H0: angle = -angle else: angle = np.pi / 2", "R H[:-1,-1] = T return H def closest_axis_2_userdefined(H, vec): #print (H) #print (np.linalg.inv(H[:-1,:-1]))", "Rotation.from_matrix(R_to_2).as_rotvec() c_to_2 = [rot_to_2 / np.linalg.norm(rot_to_2, ord=2), np.linalg.norm(rot_to_2, ord=2)] ##### Compute rotation from", "r_vec_via12w] ########################################################### ##### COMPUTE SHORTCUT: ######## rot_12 = to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix()) rot2g = to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix()) rot1g", "else: rot1g = Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec() c1g = [rot1g / np.linalg.norm(rot1g,ord=2), np.linalg.norm(rot1g,ord=2)] ##### Compute rotation", "def todegree(w): return w*180/np.pi def torad(w): return w*np.pi/180 def angle(v1, v2): v1_u =", "rot_over, angle def closest_axis_2_normal(self, H): # print (H) # print (np.linalg.inv(H[:-1,:-1])) min_angle =", "else: angle = np.pi if index == 0: rot_over = 1 else: rot_over", "c01 = [r_vec_floor, ang_floor, r_vec_floor_w] #################################################### ############ From Viapoint 1 to Viapoint 2", "sign_H0 = self.closest_axis_2_normal(self.H0) # print (index_H0, sign_H0, index, sign) # input (\"WAIT\") rot_index,", "* (z ** 2 - 1.0) return R, axis, angle def calculate_mutltiple_goals(init_information, obs):", "start to Via-2 ## R_via2 = H_via2[:-1,:-1] R_init = self.H0[:-1,:-1] R_to_2 = np.matmul(R_init.T,", "= vector_fin / np.linalg.norm(vector_fin) # The rotation axis (normalised). axis = np.cross(vector_orig, vector_fin)", "vector_fin),-1,1)) # Trig functions (only need to do this maths once!). ca =", "if sign == sign_H0: return None, None else: angle = np.pi if index", "the rotation axis defined as the cross product of the two vectors. The", "vector and desired rotation to achieve target ## ################## Goal to Viapoint 2", "(x ** 2 - 1.0) R[0, 1] = -z * sa + (1.0", "np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3, 3) init_orient = np.asarray([1,0,0]) theta, index, sign = closest_axis_2_userdefined( to_H(current_orient), init_orient) des_vec", "R[1, 2] = -x * sa + (1.0 - ca) * y *", "!= 0.0: axis = axis / axis_len # Alias the axis coordinates. x", "np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec) second_goal = np.matmul(Rot1[:-1, :-1], current_orient)", "= np.sign(theta) return min_angle, index, sign def R_2vect(self, vector_orig, vector_fin): \"\"\"Calculate the rotation", "1.0) R[0, 1] = -z * sa + (1.0 - ca) * x", "R_to_g = np.matmul(R_init.T, R_g) if np.allclose(R_to_g, np.eye(3)): c_to_g = [np.array([0, 0, 1]), 0.]", "x * y R[1, 1] = 1.0 + (1.0 - ca) * (y", "z = axis[2] if x==0 and y==0 and z==0: z=1 # The rotation", "= np.cross(vector_orig, vector_fin) axis_len = np.linalg.norm(axis) if axis_len != 0.0: axis = axis", "angle = np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1)) # Trig functions (only need to do this maths", "(np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des = np.array(vec) index = 0 sign = 0", "Rotation import numpy as np import pybullet as p def todegree(w): return w*180/np.pi", "from start to Via-2 ## R_via2 = H_via2[:-1,:-1] R_init = self.H0[:-1,:-1] R_to_2 =", "axis_len = np.linalg.norm(axis) if axis_len != 0.0: axis = axis / axis_len #", "np.linalg.norm(init_orient) current_orient = np.asarray(p.getMatrixFromQuaternion(obs[\"object_orientation\"])).reshape(3, 3) theta, index, sign = closest_axis_2_userdefined(to_H(current_orient), init_orient) des_vec =", "rotation angle is the arccosine of the dot product of the two unit", "= 190 x_des = np.array(vec) index = 0 sign = 0 reverse =", "init_orient = init_orient / np.linalg.norm(init_orient) current_orient = np.asarray(p.getMatrixFromQuaternion(obs[\"object_orientation\"])).reshape(3, 3) theta, index, sign =", "0 else: index_out = index+1 return index_out def to_H(R, T=np.zeros(3)): H = np.eye(4)", "ca) * (y ** 2 - 1.0) R[1, 2] = -x * sa", "to Via-2 ## R_via2 = H_via2[:-1,:-1] R_init = self.H0[:-1,:-1] R_to_2 = np.matmul(R_init.T, R_via2)", "1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z | | -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) | @param", "x * z R[2, 1] = x * sa + (1.0 - ca)", "R, axis, angle class RotationPrimitives(): def __init__(self, H0, Hg): self.H0 = H0 self.Hg", "ca) * x * y R[0, 2] = y * sa + (1.0", "index, sign def R_2vect(vector_orig, vector_fin): \"\"\"Calculate the rotation matrix required to rotate from", "matrix with the rotation axis defined as the cross product of the two", "z R[1, 0] = z * sa + (1.0 - ca) * x", "print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des = np.array([0, 0, 1]) index = 0", "axially symmetry, the rotation axis can be any vector lying in the symmetry", "@param vector_fin: The rotated vector defined in the reference frame. @type vector_fin: numpy", "since otherwise problems code,... # this second calculation applies the relative transformation which", "rotation angle. angle = np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1)) # Trig functions (only need to do", "vector. \"\"\" return vector / np.linalg.norm(vector) def add_one(index): if index+1 == 3: index_out", "to achieve target ## ################## Goal to Viapoint 2 ################################### theta, index, sign", "rot_over == index_H0): rot_over += 1 if sign == sign_H0: angle = -np.pi", "c1g = [np.array([0,0,1]), 0.] else: rot1g = Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec() c1g = [rot1g / np.linalg.norm(rot1g,ord=2),", "= 1 else: sign = np.sign(theta) return min_angle, index, sign def R_2vect(vector_orig, vector_fin):", "- ca) * x * y R[1, 1] = 1.0 + (1.0 -", "= np.eye(4) # Calculate the rotation matrix elements. R[0, 0] = 1.0 +", "the relative transformation which is desired based on the current observation!!! # now", "axis alignment init_orient = np.zeros(3) init_orient[:2] = np.asarray(init_information[:2]) init_orient = init_orient / np.linalg.norm(init_orient)", "* z R[2, 0] = -y * sa + (1.0 - ca) *", "axis_len # Alias the axis coordinates. x = axis[0] y = axis[1] z", "= Rotation.from_matrix(second_goal) goal_orients.append([second_goal.as_quat()+[0.001,0.001,0.001,0.001],[init_information[7],init_information[8],init_information[9]]]) #addition of noise needed since otherwise problems code,... return goal_orients", "* (y ** 2 - 1.0) R[1, 2] = -x * sa +", "# now take into account the desired rotation from the target information: des_rotation", "[x, y, z] and the rotation angle a, the rotation matrix R is::", "np.asarray(init_information[:2]) init_orient = init_orient / np.linalg.norm(init_orient) current_orient = np.asarray(p.getMatrixFromQuaternion(obs[\"object_orientation\"])).reshape(3, 3) theta, index, sign", "H_via1[:-1, 1] vec_2 = H_via2[:-1, 1] else: vec_1 = H_via1[:-1, 0] vec_2 =", "orientation that we are now at: second_goal = np.matmul(Rot1[:-1, :-1].T, second_goal) second_goal =", "angle = -np.pi / 2 if add_one(rot_over) != index_H0: angle = -angle else:", "self.Hg = Hg def set_goal(self, Hg): self.Hg = Hg def set_current_pose(self,H0): self.H0 =", "y==0 and z==0: z=1 # The rotation angle. angle = np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1)) #", "None else: angle = np.pi if index == 0: rot_over = 1 else:", "(H) # print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des = np.array([0, 0, 1]) index", "to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix()) rot1g = np.matmul(rot_12,rot2g) if np.allclose(rot1g, np.eye(4)): c1g = [np.array([0,0,1]), 0.] else: rot1g", "= np.matmul(H_via1[:-1,:-1].T, r_vec_via12w) c12 = [r_vec_via12, ang_via12, r_vec_via12w] ########################################################### ##### COMPUTE SHORTCUT: ########", "!= index_H0: angle = -angle else: angle = np.pi / 2 if add_one(rot_over)", "index = 0 for i in range(3): z = H[2, i] # print(z)", "r_vec_via2gw] ######################################################################### ############# From Floor to Viapoint 1 #################### index_H0, sign_H0 = self.find_index_z(self.H0)", "R_g = self.Hg[:-1, :-1] R_init = self.H0[:-1, :-1] R_to_g = np.matmul(R_init.T, R_g) if", "= 0 return rot_over, angle else: rot_over = 0 while (rot_over == index", "Compute rotation from start to Via-2 ## R_via2 = H_via2[:-1,:-1] R_init = self.H0[:-1,:-1]", "Calculate the rotation matrix elements. R[0, 0] = 1.0 + (1.0 - ca)", "else: vec_1 = H_via1[:-1, 0] vec_2 = H_via2[:-1, 0] R12, r_vec_via12_p, ang_via12 =", "(normalised). axis = np.cross(vector_orig, vector_fin) axis_len = np.linalg.norm(axis) if axis_len != 0.0: axis", "+ (1.0 - ca) * (z ** 2 - 1.0) return R, axis,", "= to_H(R=R_floor_1) H_via1 = np.matmul(self.H0, R_floor_1) #H_via1[1,-1] = 0.3 else: r_vec_floor = np.zeros(3)", "sign_H0, index, sign): if index == index_H0: if sign == sign_H0: return None,", "= [np.array([0, 0, 1]), 0.] else: rot_to_g = Rotation.from_matrix(R_to_g).as_rotvec() c_to_g = [rot_to_g /", "################################### theta, index, sign = self.closest_axis_2_normal(self.Hg) des_vec = np.array([0, 0, sign * 1])", "print(z) if np.abs(z) > big_Z: big_Z = np.abs(z) sign = np.sign(z) index =", "90: theta = theta - 180 if theta ==0: reverse = True if", "Trig functions (only need to do this maths once!). ca = np.cos(angle) sa", "#H_via1[1,-1] = 0.3 else: r_vec_floor = np.zeros(3) r_vec_floor[index] = 1 ang_floor = 0.", "= [rot1g / np.linalg.norm(rot1g,ord=2), np.linalg.norm(rot1g,ord=2)] ##### Compute rotation from start to Via-2 ##", "now take into account the desired rotation from the target information: des_rotation =", "to orientation that we are now at: second_goal = np.matmul(Rot1[:-1, :-1].T, second_goal) second_goal", "= np.abs(z) sign = np.sign(z) index = i return index, sign def find_rot_z(self,", "np.abs(z) sign = np.sign(z) index = i return index, sign def find_rot_z(self, index_H0,", "[rot_to_2 / np.linalg.norm(rot_to_2, ord=2), np.linalg.norm(rot_to_2, ord=2)] ##### Compute rotation from start to Goal", "c12,c2g] return command_seq, [c1g], [c_to_2, c_to_g] def find_index_z(self, H): big_Z = 0.0 index", "R12, r_vec_via12_p, ang_via12 = self.R_2vect(vec_1, vec_2) r_vec_via12w = np.zeros(3) r_vec_via12w[2] = np.sign(r_vec_via12_p[2]) r_vec_via12", "todegree(angle(x, x_des)) # print (theta) if theta > 90: theta = theta -", "get_control_seq(self, ax=None): ## Control Sequence will provide rotation vector and desired rotation to", "if index == 0: vec_1 = H_via1[:-1, 1] vec_2 = H_via2[:-1, 1] else:", "calcuation step allows to calculate a viapoint! I.e. it yields a goal orientation", "in range(3): x = H[:-1, i] theta = todegree(angle(x, x_des)) # print (theta)", "if theta > 90: theta = theta - 180 if theta ==0: reverse", "theta - 180 if theta ==0: reverse = True if min_angle > np.abs(theta):", "= np.array(vec) index = 0 sign = 0 reverse = False for i", "np.matmul(R_init.T, R_via2) if np.allclose(R_to_2, np.eye(3)): c_to_2 = [np.array([0, 0, 1]), 0.] else: rot_to_2", "c1g = [rot1g / np.linalg.norm(rot1g,ord=2), np.linalg.norm(rot1g,ord=2)] ##### Compute rotation from start to Via-2", "= unit_vector(v1) v2_u = unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) def unit_vector(vector): \"\"\"", "/ 2 if add_one(rot_over) != index_H0: angle = -angle else: angle = np.pi", "= H_via2[:-1,:-1] R_init = self.H0[:-1,:-1] R_to_2 = np.matmul(R_init.T, R_via2) if np.allclose(R_to_2, np.eye(3)): c_to_2", "for i in range(3): x = H[:-1, i] theta = todegree(angle(x, x_des)) #print", "c_to_g = [np.array([0, 0, 1]), 0.] else: rot_to_g = Rotation.from_matrix(R_to_g).as_rotvec() c_to_g = [rot_to_g", "= -x * sa + (1.0 - ca) * y * z R[2,", "index = i if theta == 0.: if reverse: sign = -1 else:", "viapoint! I.e. it yields a goal orientation for some axis alignment init_orient =", "= axis[0] y = axis[1] z = axis[2] if x==0 and y==0 and", "self.R_2vect(vec_1, vec_2) r_vec_via12w = np.zeros(3) r_vec_via12w[2] = np.sign(r_vec_via12_p[2]) r_vec_via12 = np.matmul(H_via1[:-1,:-1].T, r_vec_via12w) c12", "z = H[2, i] # print(z) if np.abs(z) > big_Z: big_Z = np.abs(z)", "allows to calculate a viapoint! I.e. it yields a goal orientation for some", "= todegree(angle(x, x_des)) # print (theta) if theta > 90: theta = theta", "= [rot_to_g / np.linalg.norm(rot_to_g, ord=2), np.linalg.norm(rot_to_g, ord=2)] command_seq = [c01, c12,c2g] return command_seq,", "information: des_rotation = np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3, 3) init_orient = np.asarray([1,0,0]) theta, index, sign = closest_axis_2_userdefined(", "- ca) * y * z R[2, 2] = 1.0 + (1.0 -", "[np.array([0,0,1]), 0.] else: rot1g = Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec() c1g = [rot1g / np.linalg.norm(rot1g,ord=2), np.linalg.norm(rot1g,ord=2)] #####", "1 + (1-cos(a))*(z*z-1) | @param R: The 3x3 rotation matrix to update. @type", "to calculate a viapoint! I.e. it yields a goal orientation for some axis", "= [rot_to_2 / np.linalg.norm(rot_to_2, ord=2), np.linalg.norm(rot_to_2, ord=2)] ##### Compute rotation from start to", "(1.0 - ca) * (x ** 2 - 1.0) R[0, 1] = -z", "angle a, the rotation matrix R is:: | 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z", "## ################## Goal to Viapoint 2 ################################### theta, index, sign = self.closest_axis_2_normal(self.Hg) des_vec", "2 ################ if index == 0: vec_1 = H_via1[:-1, 1] vec_2 = H_via2[:-1,", "reverse = True if min_angle > np.abs(theta): min_angle = np.abs(theta) index = i", "rotation matrix to update. @type R: 3x3 numpy array @param vector_orig: The unrotated", "= Rotation.from_matrix(R_to_g).as_rotvec() c_to_g = [rot_to_g / np.linalg.norm(rot_to_g, ord=2), np.linalg.norm(rot_to_g, ord=2)] command_seq = [c01,", "index_H0: angle = -angle else: angle = np.pi / 2 if add_one(rot_over) !=", "len 3 @param vector_fin: The rotated vector defined in the reference frame. @type", "This first calcuation step allows to calculate a viapoint! I.e. it yields a", "Hg): self.H0 = H0 self.Hg = Hg def set_goal(self, Hg): self.Hg = Hg", "- ca) * x * y R[0, 2] = y * sa +", "ca) * (z ** 2 - 1.0) return R, axis, angle class RotationPrimitives():", "= np.matmul(Rot1[:-1, :-1].T, second_goal) second_goal = Rotation.from_matrix(second_goal) goal_orients.append([second_goal.as_quat()+[0.001,0.001,0.001,0.001],[init_information[7],init_information[8],init_information[9]]]) #addition of noise needed since", "np.zeros(3) r_vec_via12w[2] = np.sign(r_vec_via12_p[2]) r_vec_via12 = np.matmul(H_via1[:-1,:-1].T, r_vec_via12w) c12 = [r_vec_via12, ang_via12, r_vec_via12w]", "Rotation.from_matrix(first_goal) goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]]) #addition of noise needed since otherwise problems code,... # this second", "v1_u = unit_vector(v1) v2_u = unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) def unit_vector(vector):", "Convert the vectors to unit vectors. vector_orig = vector_orig / np.linalg.norm(vector_orig) vector_fin =", "np.abs(z) > big_Z: big_Z = np.abs(z) sign = np.sign(z) index = i return", "0, 1]) index = 0 sign = 0 reverse = False for i", "ax=None): ## Control Sequence will provide rotation vector and desired rotation to achieve", "reverse: sign = -1 else: sign = 1 else: sign = np.sign(theta) return", "sign == sign_H0: angle = -np.pi / 2 if add_one(rot_over) != index_H0: angle", "[c1g], [c_to_2, c_to_g] def find_index_z(self, H): big_Z = 0.0 index = 0 for", "SHORTCUT: ######## rot_12 = to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix()) rot2g = to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix()) rot1g = np.matmul(rot_12,rot2g) if np.allclose(rot1g,", "-angle else: angle = np.pi / 2 if add_one(rot_over) != index_H0: angle =", "2 - 1.0) return R, axis, angle class RotationPrimitives(): def __init__(self, H0, Hg):", "0.3 else: r_vec_floor = np.zeros(3) r_vec_floor[index] = 1 ang_floor = 0. H_via1 =", "np.matmul(H_via2[:-1,:-1].T, r_vec_via2gw) c2g = [r_vec_via2g, -ang_via, r_vec_via2gw] ######################################################################### ############# From Floor to Viapoint", "vector_fin): \"\"\"Calculate the rotation matrix required to rotate from one vector to another.", "of the two vectors. The rotation angle is the arccosine of the dot", "vector_orig / np.linalg.norm(vector_orig) vector_fin = vector_fin / np.linalg.norm(vector_fin) # The rotation axis (normalised).", "= H_via1[:-1, 0] vec_2 = H_via2[:-1, 0] R12, r_vec_via12_p, ang_via12 = self.R_2vect(vec_1, vec_2)", "c_to_2 = [rot_to_2 / np.linalg.norm(rot_to_2, ord=2), np.linalg.norm(rot_to_2, ord=2)] ##### Compute rotation from start", "np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec) first_goal = np.matmul(Rot1[:-1, :-1], current_orient)", "sign = np.sign(z) index = i return index, sign def find_rot_z(self, index_H0, sign_H0,", "index_out def to_H(R, T=np.zeros(3)): H = np.eye(4) H[:-1,:-1] = R H[:-1,-1] = T", "Floor to Viapoint 1 #################### index_H0, sign_H0 = self.find_index_z(self.H0) #theta_0 ,index_H0, sign_H0 =", "angle(v1, v2): v1_u = unit_vector(v1) v2_u = unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))", "np.matmul(R, self.Hg) H_via2[:-1,-1] = self.Hg[:-1,-1] H_via2[2, -1] = 0. r_vec_via2g = np.matmul(H_via2[:-1,:-1].T, r_vec_via2gw)", "x_des = np.array(vec) index = 0 sign = 0 reverse = False for", "first_goal = np.matmul(Rot1[:-1, :-1], current_orient) first_goal = Rotation.from_matrix(first_goal) goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]]) #addition of noise needed", "= self.R_2vect(self.Hg[:-1, index], des_vec) H_via2 = np.matmul(R, self.Hg) H_via2[:-1,-1] = self.Hg[:-1,-1] H_via2[2, -1]", "R_via2) if np.allclose(R_to_2, np.eye(3)): c_to_2 = [np.array([0, 0, 1]), 0.] else: rot_to_2 =", "yields a goal orientation for some axis alignment init_orient = np.zeros(3) init_orient[:2] =", "first_goal = Rotation.from_matrix(first_goal) goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]]) #addition of noise needed since otherwise problems code,... #", "[c_to_2, c_to_g] def find_index_z(self, H): big_Z = 0.0 index = 0 for i", "the rotation of one vector to another, there are an infinit series of", "* sa + (1.0 - ca) * y * z R[2, 0] =", "return R, axis, angle class RotationPrimitives(): def __init__(self, H0, Hg): self.H0 = H0", "0: rot_over = 1 else: rot_over = 0 return rot_over, angle else: rot_over", "np.array([0, 0, sign * 1]) R, r_vec_via2gw, ang_via = self.R_2vect(self.Hg[:-1, index], des_vec) H_via2", "be any vector lying in the symmetry plane between the two vectors. Hence", "= np.matmul(R, self.Hg) H_via2[:-1,-1] = self.Hg[:-1,-1] H_via2[2, -1] = 0. r_vec_via2g = np.matmul(H_via2[:-1,:-1].T,", "np.sin(angle) R = np.eye(4) # Calculate the rotation matrix elements. R[0, 0] =", "the desired rotation from the target information: des_rotation = np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3, 3) init_orient =", "of noise needed since otherwise problems code,... # this second calculation applies the", "now rotate back to orientation that we are now at: second_goal = np.matmul(Rot1[:-1,", "= [r_vec_via2g, -ang_via, r_vec_via2gw] ######################################################################### ############# From Floor to Viapoint 1 #################### index_H0,", "the rotation axis, w = [x, y, z] and the rotation angle a,", "unit vector parallel to the rotation axis, w = [x, y, z] and", "R_floor_1 = rotation_floor.as_matrix() R_floor_1 = to_H(R=R_floor_1) H_via1 = np.matmul(self.H0, R_floor_1) #H_via1[1,-1] = 0.3", "axis, angle class RotationPrimitives(): def __init__(self, H0, Hg): self.H0 = H0 self.Hg =", "as np import pybullet as p def todegree(w): return w*180/np.pi def torad(w): return", "are an infinit series of rotation matrices possible. Due to axially symmetry, the", "import Rotation import numpy as np import pybullet as p def todegree(w): return", "and y==0 and z==0: z=1 # The rotation angle. angle = np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1))", "False for i in range(3): x = H[:-1, i] theta = todegree(angle(x, x_des))", "ca) * (x ** 2 - 1.0) R[0, 1] = -z * sa", "index, sign) if rot_index is not None: r_vec_floor = np.zeros(3) r_vec_floor[rot_index] = 1", "matrix R is:: | 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z | R = |", "H_via1[:-1, 0] vec_2 = H_via2[:-1, 0] R12, r_vec_via12_p, ang_via12 = self.R_2vect(vec_1, vec_2) r_vec_via12w", "[np.array([0, 0, 1]), 0.] else: rot_to_2 = Rotation.from_matrix(R_to_2).as_rotvec() c_to_2 = [rot_to_2 / np.linalg.norm(rot_to_2,", "= to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix()) rot1g = np.matmul(rot_12,rot2g) if np.allclose(rot1g, np.eye(4)): c1g = [np.array([0,0,1]), 0.] else:", "vector defined in the reference frame. @type vector_fin: numpy array, len 3 \"\"\"", "sign): if index == index_H0: if sign == sign_H0: return None, None else:", "in the reference frame. @type vector_orig: numpy array, len 3 @param vector_fin: The", "= Hg def set_current_pose(self,H0): self.H0 = H0 def get_control_seq(self, ax=None): ## Control Sequence", "else: sign = np.sign(theta) return min_angle, index, sign def R_2vect(self, vector_orig, vector_fin): \"\"\"Calculate", "once!). ca = np.cos(angle) sa = np.sin(angle) R = np.eye(4) # Calculate the", "ord=2)] ##### Compute rotation from start to Goal ### R_g = self.Hg[:-1, :-1]", "be used to construct the matrix with the rotation axis defined as the", "required to rotate from one vector to another. For the rotation of one", "self.H0[:-1, :-1] R_to_g = np.matmul(R_init.T, R_g) if np.allclose(R_to_g, np.eye(3)): c_to_g = [np.array([0, 0,", "theta = todegree(angle(x, x_des)) # print (theta) if theta > 90: theta =", "(index_H0, sign_H0, index, sign) # input (\"WAIT\") rot_index, ang_floor = self.find_rot_z(index_H0, sign_H0, index,", "H_via1 = np.matmul(self.H0, R_floor_1) #H_via1[1,-1] = 0.3 else: r_vec_floor = np.zeros(3) r_vec_floor[index] =", "c_to_g = [rot_to_g / np.linalg.norm(rot_to_g, ord=2), np.linalg.norm(rot_to_g, ord=2)] command_seq = [c01, c12,c2g] return", "R, r_vec_via2gw, ang_via = self.R_2vect(self.Hg[:-1, index], des_vec) H_via2 = np.matmul(R, self.Hg) H_via2[:-1,-1] =", "rotation from start to Goal ### R_g = self.Hg[:-1, :-1] R_init = self.H0[:-1,", "= np.sign(r_vec_via12_p[2]) r_vec_via12 = np.matmul(H_via1[:-1,:-1].T, r_vec_via12w) c12 = [r_vec_via12, ang_via12, r_vec_via12w] ########################################################### #####", "= np.matmul(R_init.T, R_via2) if np.allclose(R_to_2, np.eye(3)): c_to_2 = [np.array([0, 0, 1]), 0.] else:", "# input (\"WAIT\") rot_index, ang_floor = self.find_rot_z(index_H0, sign_H0, index, sign) if rot_index is", "1 ang_floor = 0. H_via1 = self.H0 r_vec_floor_w = np.matmul(self.H0[:-1,:-1], r_vec_floor) c01 =", "for some axis alignment init_orient = np.zeros(3) init_orient[:2] = np.asarray(init_information[:2]) init_orient = init_orient", "between the two vectors. Hence the axis-angle convention will be used to construct", "Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec) second_goal = np.matmul(Rot1[:-1, :-1], current_orient) #", "# Trig functions (only need to do this maths once!). ca = np.cos(angle)", "= 0.0 index = 0 for i in range(3): z = H[2, i]", "R_2vect(self, vector_orig, vector_fin): \"\"\"Calculate the rotation matrix required to rotate from one vector", "series of rotation matrices possible. Due to axially symmetry, the rotation axis can", "R_floor_1 = to_H(R=R_floor_1) H_via1 = np.matmul(self.H0, R_floor_1) #H_via1[1,-1] = 0.3 else: r_vec_floor =", "this second calculation applies the relative transformation which is desired based on the", "= -np.pi / 2 if add_one(rot_over) != index_H0: angle = -angle else: angle", "another. For the rotation of one vector to another, there are an infinit", "* x * y R[1, 1] = 1.0 + (1.0 - ca) *", "= R_2vect(current_orient[:, index], des_vec) first_goal = np.matmul(Rot1[:-1, :-1], current_orient) first_goal = Rotation.from_matrix(first_goal) goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]])", "R[2, 1] = x * sa + (1.0 - ca) * y *", "== index_H0: if sign == sign_H0: return None, None else: angle = np.pi", "r_vec_floor[rot_index] = 1 rotation_floor = Rotation.from_rotvec(ang_floor * r_vec_floor) R_floor_1 = rotation_floor.as_matrix() R_floor_1 =", "= [c01, c12,c2g] return command_seq, [c1g], [c_to_2, c_to_g] def find_index_z(self, H): big_Z =", "r_vec_floor) c01 = [r_vec_floor, ang_floor, r_vec_floor_w] #################################################### ############ From Viapoint 1 to Viapoint", "ang_via = R_2vect(current_orient[:, index], des_vec) second_goal = np.matmul(Rot1[:-1, :-1], current_orient) # now apply", "1]), 0.] else: rot_to_g = Rotation.from_matrix(R_to_g).as_rotvec() c_to_g = [rot_to_g / np.linalg.norm(rot_to_g, ord=2), np.linalg.norm(rot_to_g,", "[rot_to_g / np.linalg.norm(rot_to_g, ord=2), np.linalg.norm(rot_to_g, ord=2)] command_seq = [c01, c12,c2g] return command_seq, [c1g],", "return R, axis, angle def calculate_mutltiple_goals(init_information, obs): goal_orients = [] # This first", "matrix to update. @type R: 3x3 numpy array @param vector_orig: The unrotated vector", "= self.Hg[:-1, :-1] R_init = self.H0[:-1, :-1] R_to_g = np.matmul(R_init.T, R_g) if np.allclose(R_to_g,", "* y R[0, 2] = y * sa + (1.0 - ca) *", "the dot product of the two unit vectors. Given a unit vector parallel", "index], des_vec) H_via2 = np.matmul(R, self.Hg) H_via2[:-1,-1] = self.Hg[:-1,-1] H_via2[2, -1] = 0.", "R: 3x3 numpy array @param vector_orig: The unrotated vector defined in the reference", "index_H0: angle = -angle return rot_over, angle def closest_axis_2_normal(self, H): # print (H)", "### R_g = self.Hg[:-1, :-1] R_init = self.H0[:-1, :-1] R_to_g = np.matmul(R_init.T, R_g)", "index, sign = closest_axis_2_userdefined( to_H(current_orient), init_orient) des_vec = sign * np.array(init_orient) Rot1, r_vec_via2gw,", "vector of the vector. \"\"\" return vector / np.linalg.norm(vector) def add_one(index): if index+1", "1] vec_2 = H_via2[:-1, 1] else: vec_1 = H_via1[:-1, 0] vec_2 = H_via2[:-1,", "r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec) second_goal = np.matmul(Rot1[:-1, :-1], current_orient) # now", "min_angle, index, sign def R_2vect(vector_orig, vector_fin): \"\"\"Calculate the rotation matrix required to rotate", "sign def R_2vect(self, vector_orig, vector_fin): \"\"\"Calculate the rotation matrix required to rotate from", "as p def todegree(w): return w*180/np.pi def torad(w): return w*np.pi/180 def angle(v1, v2):", "alignment init_orient = np.zeros(3) init_orient[:2] = np.asarray(init_information[:2]) init_orient = init_orient / np.linalg.norm(init_orient) current_orient", "np.eye(3)): c_to_2 = [np.array([0, 0, 1]), 0.] else: rot_to_2 = Rotation.from_matrix(R_to_2).as_rotvec() c_to_2 =", "np from scipy.spatial.transform import Rotation import numpy as np import pybullet as p", "-ang_via, r_vec_via2gw] ######################################################################### ############# From Floor to Viapoint 1 #################### index_H0, sign_H0 =", "= init_orient / np.linalg.norm(init_orient) current_orient = np.asarray(p.getMatrixFromQuaternion(obs[\"object_orientation\"])).reshape(3, 3) theta, index, sign = closest_axis_2_userdefined(to_H(current_orient),", "= to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix()) rot2g = to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix()) rot1g = np.matmul(rot_12,rot2g) if np.allclose(rot1g, np.eye(4)): c1g =", "sign_H0: return None, None else: angle = np.pi if index == 0: rot_over", "np.linalg.norm(axis) if axis_len != 0.0: axis = axis / axis_len # Alias the", "x==0 and y==0 and z==0: z=1 # The rotation angle. angle = np.arccos(np.clip(np.dot(vector_orig,", "##### Compute rotation from start to Via-2 ## R_via2 = H_via2[:-1,:-1] R_init =", "np.linalg.norm(vector) def add_one(index): if index+1 == 3: index_out = 0 else: index_out =", "* sa + (1.0 - ca) * x * z R[1, 0] =", "theta, index, sign = closest_axis_2_userdefined(to_H(current_orient), init_orient) des_vec = sign * np.array(init_orient) Rot1, r_vec_via2gw,", "0.0 index = 0 for i in range(3): z = H[2, i] #", "if np.allclose(R_to_g, np.eye(3)): c_to_g = [np.array([0, 0, 1]), 0.] else: rot_to_g = Rotation.from_matrix(R_to_g).as_rotvec()", "| z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z | | -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1)", "import numpy as np import pybullet as p def todegree(w): return w*180/np.pi def", "np.array([0, 0, 1]) index = 0 sign = 0 reverse = False for", "index_H0: if sign == sign_H0: return None, None else: angle = np.pi if", "(1.0 - ca) * y * z R[2, 0] = -y * sa", "@param vector_orig: The unrotated vector defined in the reference frame. @type vector_orig: numpy", "= -y * sa + (1.0 - ca) * x * z R[2,", "closest_axis_2_userdefined( to_H(current_orient), init_orient) des_vec = sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:,", "* z R[2, 2] = 1.0 + (1.0 - ca) * (z **", "rotation matrices possible. Due to axially symmetry, the rotation axis can be any", "################ if index == 0: vec_1 = H_via1[:-1, 1] vec_2 = H_via2[:-1, 1]", "angle = -angle else: angle = np.pi / 2 if add_one(rot_over) != index_H0:", "= np.asarray(init_information[:2]) init_orient = init_orient / np.linalg.norm(init_orient) current_orient = np.asarray(p.getMatrixFromQuaternion(obs[\"object_orientation\"])).reshape(3, 3) theta, index,", "self.H0 = H0 def get_control_seq(self, ax=None): ## Control Sequence will provide rotation vector", "r_vec_floor) R_floor_1 = rotation_floor.as_matrix() R_floor_1 = to_H(R=R_floor_1) H_via1 = np.matmul(self.H0, R_floor_1) #H_via1[1,-1] =", "vec_2) r_vec_via12w = np.zeros(3) r_vec_via12w[2] = np.sign(r_vec_via12_p[2]) r_vec_via12 = np.matmul(H_via1[:-1,:-1].T, r_vec_via12w) c12 =", "from one vector to another. For the rotation of one vector to another,", "== 3: index_out = 0 else: index_out = index+1 return index_out def to_H(R,", "(rot_over == index or rot_over == index_H0): rot_over += 1 if sign ==", "1] = -z * sa + (1.0 - ca) * x * y", "needed since otherwise problems code,... # this second calculation applies the relative transformation", "Control Sequence will provide rotation vector and desired rotation to achieve target ##", "from scipy.spatial.transform import Rotation import numpy as np import pybullet as p def", "todegree(angle(x, x_des)) #print (theta) if theta > 90: theta = theta - 180", "H def closest_axis_2_userdefined(H, vec): #print (H) #print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des =", "rotation of one vector to another, there are an infinit series of rotation", "index_H0): rot_over += 1 if sign == sign_H0: angle = -np.pi / 2", "1.0 + (1.0 - ca) * (x ** 2 - 1.0) R[0, 1]", "else: sign = np.sign(theta) return min_angle, index, sign def R_2vect(vector_orig, vector_fin): \"\"\"Calculate the", "R_via2 = H_via2[:-1,:-1] R_init = self.H0[:-1,:-1] R_to_2 = np.matmul(R_init.T, R_via2) if np.allclose(R_to_2, np.eye(3)):", "calculate_mutltiple_goals(init_information, obs): goal_orients = [] # This first calcuation step allows to calculate", "None: r_vec_floor = np.zeros(3) r_vec_floor[rot_index] = 1 rotation_floor = Rotation.from_rotvec(ang_floor * r_vec_floor) R_floor_1", "sign_H0 = self.find_index_z(self.H0) #theta_0 ,index_H0, sign_H0 = self.closest_axis_2_normal(self.H0) # print (index_H0, sign_H0, index,", "x * y R[0, 2] = y * sa + (1.0 - ca)", "rot_to_g = Rotation.from_matrix(R_to_g).as_rotvec() c_to_g = [rot_to_g / np.linalg.norm(rot_to_g, ord=2), np.linalg.norm(rot_to_g, ord=2)] command_seq =", "= axis[2] if x==0 and y==0 and z==0: z=1 # The rotation angle.", "= np.matmul(self.H0, R_floor_1) #H_via1[1,-1] = 0.3 else: r_vec_floor = np.zeros(3) r_vec_floor[index] = 1", "start to Goal ### R_g = self.Hg[:-1, :-1] R_init = self.H0[:-1, :-1] R_to_g", "vec_1 = H_via1[:-1, 1] vec_2 = H_via2[:-1, 1] else: vec_1 = H_via1[:-1, 0]", "sign) # input (\"WAIT\") rot_index, ang_floor = self.find_rot_z(index_H0, sign_H0, index, sign) if rot_index", "w = [x, y, z] and the rotation angle a, the rotation matrix", "(1.0 - ca) * x * y R[0, 2] = y * sa", "y * z R[2, 0] = -y * sa + (1.0 - ca)", "class RotationPrimitives(): def __init__(self, H0, Hg): self.H0 = H0 self.Hg = Hg def", "rot_over = 0 return rot_over, angle else: rot_over = 0 while (rot_over ==", "-z * sa + (1.0 - ca) * x * y R[0, 2]", "ord=2)] command_seq = [c01, c12,c2g] return command_seq, [c1g], [c_to_2, c_to_g] def find_index_z(self, H):", "y R[1, 1] = 1.0 + (1.0 - ca) * (y ** 2", "R_init = self.H0[:-1, :-1] R_to_g = np.matmul(R_init.T, R_g) if np.allclose(R_to_g, np.eye(3)): c_to_g =", "RotationPrimitives(): def __init__(self, H0, Hg): self.H0 = H0 self.Hg = Hg def set_goal(self,", "goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]]) #addition of noise needed since otherwise problems code,... # this second calculation", "np.pi if index == 0: rot_over = 1 else: rot_over = 0 return", "to unit vectors. vector_orig = vector_orig / np.linalg.norm(vector_orig) vector_fin = vector_fin / np.linalg.norm(vector_fin)", "def unit_vector(vector): \"\"\" Returns the unit vector of the vector. \"\"\" return vector", "= 1.0 + (1.0 - ca) * (z ** 2 - 1.0) return", "r_vec_via2g = np.matmul(H_via2[:-1,:-1].T, r_vec_via2gw) c2g = [r_vec_via2g, -ang_via, r_vec_via2gw] ######################################################################### ############# From Floor", "0, 1]), 0.] else: rot_to_g = Rotation.from_matrix(R_to_g).as_rotvec() c_to_g = [rot_to_g / np.linalg.norm(rot_to_g, ord=2),", "vector parallel to the rotation axis, w = [x, y, z] and the", "= vector_orig / np.linalg.norm(vector_orig) vector_fin = vector_fin / np.linalg.norm(vector_fin) # The rotation axis", "rot_over += 1 if sign == sign_H0: angle = -np.pi / 2 if", "r_vec_via12 = np.matmul(H_via1[:-1,:-1].T, r_vec_via12w) c12 = [r_vec_via12, ang_via12, r_vec_via12w] ########################################################### ##### COMPUTE SHORTCUT:", "(1.0 - ca) * x * z R[1, 0] = z * sa", "x * z R[1, 0] = z * sa + (1.0 - ca)", "vec_1 = H_via1[:-1, 0] vec_2 = H_via2[:-1, 0] R12, r_vec_via12_p, ang_via12 = self.R_2vect(vec_1,", "| @param R: The 3x3 rotation matrix to update. @type R: 3x3 numpy", "** 2 - 1.0) R[1, 2] = -x * sa + (1.0 -", "##### Compute rotation from start to Goal ### R_g = self.Hg[:-1, :-1] R_init", "c_to_g] def find_index_z(self, H): big_Z = 0.0 index = 0 for i in", "Viapoint 1 #################### index_H0, sign_H0 = self.find_index_z(self.H0) #theta_0 ,index_H0, sign_H0 = self.closest_axis_2_normal(self.H0) #", "Goal ### R_g = self.Hg[:-1, :-1] R_init = self.H0[:-1, :-1] R_to_g = np.matmul(R_init.T,", "set_current_pose(self,H0): self.H0 = H0 def get_control_seq(self, ax=None): ## Control Sequence will provide rotation", "w*180/np.pi def torad(w): return w*np.pi/180 def angle(v1, v2): v1_u = unit_vector(v1) v2_u =", "vectors. Hence the axis-angle convention will be used to construct the matrix with", "scipy.spatial.transform import Rotation import numpy as np import pybullet as p def todegree(w):", "= np.array([0, 0, 1]) index = 0 sign = 0 reverse = False", "sign == sign_H0: return None, None else: angle = np.pi if index ==", "i in range(3): x = H[:-1, i] theta = todegree(angle(x, x_des)) # print", "closest_axis_2_userdefined(to_H(current_orient), init_orient) des_vec = sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index],", "= np.matmul(Rot1[:-1, :-1], current_orient) # now apply rotation: second_goal = np.matmul(des_rotation, second_goal) #", "sign = 0 reverse = False for i in range(3): x = H[:-1,", "* sa + (1.0 - ca) * y * z R[2, 2] =", "1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z | R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1)", "= 0. r_vec_via2g = np.matmul(H_via2[:-1,:-1].T, r_vec_via2gw) c2g = [r_vec_via2g, -ang_via, r_vec_via2gw] ######################################################################### #############", "From Floor to Viapoint 1 #################### index_H0, sign_H0 = self.find_index_z(self.H0) #theta_0 ,index_H0, sign_H0", "rot1g = Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec() c1g = [rot1g / np.linalg.norm(rot1g,ord=2), np.linalg.norm(rot1g,ord=2)] ##### Compute rotation from", "def add_one(index): if index+1 == 3: index_out = 0 else: index_out = index+1", "np.matmul(Rot1[:-1, :-1], current_orient) # now apply rotation: second_goal = np.matmul(des_rotation, second_goal) # now", "np.matmul(H_via1[:-1,:-1].T, r_vec_via12w) c12 = [r_vec_via12, ang_via12, r_vec_via12w] ########################################################### ##### COMPUTE SHORTCUT: ######## rot_12", "1.0) R[1, 2] = -x * sa + (1.0 - ca) * y", "problems code,... # this second calculation applies the relative transformation which is desired", "if index+1 == 3: index_out = 0 else: index_out = index+1 return index_out", "index = i return index, sign def find_rot_z(self, index_H0, sign_H0, index, sign): if", "vector / np.linalg.norm(vector) def add_one(index): if index+1 == 3: index_out = 0 else:", "= np.matmul(des_rotation, second_goal) # now rotate back to orientation that we are now", "Rotation.from_rotvec(ang_floor * r_vec_floor) R_floor_1 = rotation_floor.as_matrix() R_floor_1 = to_H(R=R_floor_1) H_via1 = np.matmul(self.H0, R_floor_1)", "angle = -angle return rot_over, angle def closest_axis_2_normal(self, H): # print (H) #", "vectors to unit vectors. vector_orig = vector_orig / np.linalg.norm(vector_orig) vector_fin = vector_fin /", "= self.Hg[:-1,-1] H_via2[2, -1] = 0. r_vec_via2g = np.matmul(H_via2[:-1,:-1].T, r_vec_via2gw) c2g = [r_vec_via2g,", "the rotation angle a, the rotation matrix R is:: | 1 + (1-cos(a))*(x*x-1)", "The rotated vector defined in the reference frame. @type vector_fin: numpy array, len", "at: second_goal = np.matmul(Rot1[:-1, :-1].T, second_goal) second_goal = Rotation.from_matrix(second_goal) goal_orients.append([second_goal.as_quat()+[0.001,0.001,0.001,0.001],[init_information[7],init_information[8],init_information[9]]]) #addition of noise", "self.R_2vect(self.Hg[:-1, index], des_vec) H_via2 = np.matmul(R, self.Hg) H_via2[:-1,-1] = self.Hg[:-1,-1] H_via2[2, -1] =", "to do this maths once!). ca = np.cos(angle) sa = np.sin(angle) R =", "np.matmul(des_rotation, second_goal) # now rotate back to orientation that we are now at:", "axis[1] z = axis[2] if x==0 and y==0 and z==0: z=1 # The", "the rotation matrix R is:: | 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z | R", "input (\"WAIT\") rot_index, ang_floor = self.find_rot_z(index_H0, sign_H0, index, sign) if rot_index is not", "1 to Viapoint 2 ################ if index == 0: vec_1 = H_via1[:-1, 1]", "R = np.eye(4) # Calculate the rotation matrix elements. R[0, 0] = 1.0", "the two unit vectors. Given a unit vector parallel to the rotation axis,", "0.: if reverse: sign = -1 else: sign = 1 else: sign =", "self.H0 = H0 self.Hg = Hg def set_goal(self, Hg): self.Hg = Hg def", "rot_over = 0 while (rot_over == index or rot_over == index_H0): rot_over +=", "to update. @type R: 3x3 numpy array @param vector_orig: The unrotated vector defined", "= np.matmul(Rot1[:-1, :-1], current_orient) first_goal = Rotation.from_matrix(first_goal) goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]]) #addition of noise needed since", "i] theta = todegree(angle(x, x_des)) #print (theta) if theta > 90: theta =", "= [x, y, z] and the rotation angle a, the rotation matrix R", "- 1.0) return R, axis, angle class RotationPrimitives(): def __init__(self, H0, Hg): self.H0", "Alias the axis coordinates. x = axis[0] y = axis[1] z = axis[2]", "do this maths once!). ca = np.cos(angle) sa = np.sin(angle) R = np.eye(4)", "# The rotation angle. angle = np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1)) # Trig functions (only need", "p def todegree(w): return w*180/np.pi def torad(w): return w*np.pi/180 def angle(v1, v2): v1_u", "a viapoint! I.e. it yields a goal orientation for some axis alignment init_orient", "desired based on the current observation!!! # now take into account the desired", "a goal orientation for some axis alignment init_orient = np.zeros(3) init_orient[:2] = np.asarray(init_information[:2])", "H_via2[:-1,:-1] R_init = self.H0[:-1,:-1] R_to_2 = np.matmul(R_init.T, R_via2) if np.allclose(R_to_2, np.eye(3)): c_to_2 =", "to_H(current_orient), init_orient) des_vec = sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index],", "the arccosine of the dot product of the two unit vectors. Given a", "* np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec) second_goal = np.matmul(Rot1[:-1, :-1],", "[r_vec_via12, ang_via12, r_vec_via12w] ########################################################### ##### COMPUTE SHORTCUT: ######## rot_12 = to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix()) rot2g =", "R[2, 2] = 1.0 + (1.0 - ca) * (z ** 2 -", "= theta - 180 if theta ==0: reverse = True if min_angle >", "#print (H) #print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des = np.array(vec) index = 0", "= self.H0[:-1,:-1] R_to_2 = np.matmul(R_init.T, R_via2) if np.allclose(R_to_2, np.eye(3)): c_to_2 = [np.array([0, 0,", "dot product of the two unit vectors. Given a unit vector parallel to", "(only need to do this maths once!). ca = np.cos(angle) sa = np.sin(angle)", "parallel to the rotation axis, w = [x, y, z] and the rotation", "= 0 reverse = False for i in range(3): x = H[:-1, i]", "= axis[1] z = axis[2] if x==0 and y==0 and z==0: z=1 #", "self.Hg) H_via2[:-1,-1] = self.Hg[:-1,-1] H_via2[2, -1] = 0. r_vec_via2g = np.matmul(H_via2[:-1,:-1].T, r_vec_via2gw) c2g", "rotation vector and desired rotation to achieve target ## ################## Goal to Viapoint", "else: rot_to_g = Rotation.from_matrix(R_to_g).as_rotvec() c_to_g = [rot_to_g / np.linalg.norm(rot_to_g, ord=2), np.linalg.norm(rot_to_g, ord=2)] command_seq", "= np.matmul(H_via2[:-1,:-1].T, r_vec_via2gw) c2g = [r_vec_via2g, -ang_via, r_vec_via2gw] ######################################################################### ############# From Floor to", "= Rotation.from_rotvec(ang_floor * r_vec_floor) R_floor_1 = rotation_floor.as_matrix() R_floor_1 = to_H(R=R_floor_1) H_via1 = np.matmul(self.H0,", "= np.asarray(p.getMatrixFromQuaternion(obs[\"object_orientation\"])).reshape(3, 3) theta, index, sign = closest_axis_2_userdefined(to_H(current_orient), init_orient) des_vec = sign *", "unit vectors. vector_orig = vector_orig / np.linalg.norm(vector_orig) vector_fin = vector_fin / np.linalg.norm(vector_fin) #", "#print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des = np.array(vec) index = 0 sign =", "index+1 == 3: index_out = 0 else: index_out = index+1 return index_out def", "(1.0 - ca) * (y ** 2 - 1.0) R[1, 2] = -x", "R_init = self.H0[:-1,:-1] R_to_2 = np.matmul(R_init.T, R_via2) if np.allclose(R_to_2, np.eye(3)): c_to_2 = [np.array([0,", "= self.H0 r_vec_floor_w = np.matmul(self.H0[:-1,:-1], r_vec_floor) c01 = [r_vec_floor, ang_floor, r_vec_floor_w] #################################################### ############", "in the reference frame. @type vector_fin: numpy array, len 3 \"\"\" # Convert", "rotation axis, w = [x, y, z] and the rotation angle a, the", "== sign_H0: return None, None else: angle = np.pi if index == 0:", "return min_angle, index, sign def R_2vect(vector_orig, vector_fin): \"\"\"Calculate the rotation matrix required to", "############# From Floor to Viapoint 1 #################### index_H0, sign_H0 = self.find_index_z(self.H0) #theta_0 ,index_H0,", "defined in the reference frame. @type vector_orig: numpy array, len 3 @param vector_fin:", "Viapoint 2 ################ if index == 0: vec_1 = H_via1[:-1, 1] vec_2 =", "= 0. H_via1 = self.H0 r_vec_floor_w = np.matmul(self.H0[:-1,:-1], r_vec_floor) c01 = [r_vec_floor, ang_floor,", "min_angle = 190 x_des = np.array([0, 0, 1]) index = 0 sign =", "- ca) * x * z R[2, 1] = x * sa +", "sign = -1 else: sign = 1 else: sign = np.sign(theta) return min_angle,", "vector to another, there are an infinit series of rotation matrices possible. Due", "current_orient) # now apply rotation: second_goal = np.matmul(des_rotation, second_goal) # now rotate back", "= self.closest_axis_2_normal(self.Hg) des_vec = np.array([0, 0, sign * 1]) R, r_vec_via2gw, ang_via =", "0, 1]), 0.] else: rot_to_2 = Rotation.from_matrix(R_to_2).as_rotvec() c_to_2 = [rot_to_2 / np.linalg.norm(rot_to_2, ord=2),", "find_rot_z(self, index_H0, sign_H0, index, sign): if index == index_H0: if sign == sign_H0:", "sign = closest_axis_2_userdefined(to_H(current_orient), init_orient) des_vec = sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via =", "Goal to Viapoint 2 ################################### theta, index, sign = self.closest_axis_2_normal(self.Hg) des_vec = np.array([0,", "will provide rotation vector and desired rotation to achieve target ## ################## Goal", "# now apply rotation: second_goal = np.matmul(des_rotation, second_goal) # now rotate back to", "there are an infinit series of rotation matrices possible. Due to axially symmetry,", "H): # print (H) # print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des = np.array([0,", "180 if theta ==0: reverse = True if min_angle > np.abs(theta): min_angle =", "= [] # This first calcuation step allows to calculate a viapoint! I.e.", "else: sign = 1 else: sign = np.sign(theta) return min_angle, index, sign def", "elements. R[0, 0] = 1.0 + (1.0 - ca) * (x ** 2", "T=np.zeros(3)): H = np.eye(4) H[:-1,:-1] = R H[:-1,-1] = T return H def", "y = axis[1] z = axis[2] if x==0 and y==0 and z==0: z=1", "axis (normalised). axis = np.cross(vector_orig, vector_fin) axis_len = np.linalg.norm(axis) if axis_len != 0.0:", "z R[2, 1] = x * sa + (1.0 - ca) * y", "(np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des = np.array([0, 0, 1]) index = 0 sign", ":-1] R_init = self.H0[:-1, :-1] R_to_g = np.matmul(R_init.T, R_g) if np.allclose(R_to_g, np.eye(3)): c_to_g", "two vectors. The rotation angle is the arccosine of the dot product of", "min_angle, index, sign def R_2vect(self, vector_orig, vector_fin): \"\"\"Calculate the rotation matrix required to", "and z==0: z=1 # The rotation angle. angle = np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1)) # Trig", "range(3): z = H[2, i] # print(z) if np.abs(z) > big_Z: big_Z =", "print (H) # print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des = np.array([0, 0, 1])", "angle. angle = np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1)) # Trig functions (only need to do this", "axis_len != 0.0: axis = axis / axis_len # Alias the axis coordinates.", "3x3 rotation matrix to update. @type R: 3x3 numpy array @param vector_orig: The", "H_via2[:-1,-1] = self.Hg[:-1,-1] H_via2[2, -1] = 0. r_vec_via2g = np.matmul(H_via2[:-1,:-1].T, r_vec_via2gw) c2g =", "0 for i in range(3): z = H[2, i] # print(z) if np.abs(z)", "c12 = [r_vec_via12, ang_via12, r_vec_via12w] ########################################################### ##### COMPUTE SHORTCUT: ######## rot_12 = to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix())", "0, sign * 1]) R, r_vec_via2gw, ang_via = self.R_2vect(self.Hg[:-1, index], des_vec) H_via2 =", "= H[2, i] # print(z) if np.abs(z) > big_Z: big_Z = np.abs(z) sign", "# Alias the axis coordinates. x = axis[0] y = axis[1] z =", "* y R[1, 1] = 1.0 + (1.0 - ca) * (y **", "take into account the desired rotation from the target information: des_rotation = np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3,", "des_vec = np.array([0, 0, sign * 1]) R, r_vec_via2gw, ang_via = self.R_2vect(self.Hg[:-1, index],", "y * sa + (1.0 - ca) * x * z R[1, 0]", "- 1.0) return R, axis, angle def calculate_mutltiple_goals(init_information, obs): goal_orients = [] #", "= i return index, sign def find_rot_z(self, index_H0, sign_H0, index, sign): if index", ",index_H0, sign_H0 = self.closest_axis_2_normal(self.H0) # print (index_H0, sign_H0, index, sign) # input (\"WAIT\")", "vectors. Given a unit vector parallel to the rotation axis, w = [x,", "ang_floor = 0. H_via1 = self.H0 r_vec_floor_w = np.matmul(self.H0[:-1,:-1], r_vec_floor) c01 = [r_vec_floor,", "init_orient = np.zeros(3) init_orient[:2] = np.asarray(init_information[:2]) init_orient = init_orient / np.linalg.norm(init_orient) current_orient =", "rotation matrix elements. R[0, 0] = 1.0 + (1.0 - ca) * (x", "0: vec_1 = H_via1[:-1, 1] vec_2 = H_via2[:-1, 1] else: vec_1 = H_via1[:-1,", "0. H_via1 = self.H0 r_vec_floor_w = np.matmul(self.H0[:-1,:-1], r_vec_floor) c01 = [r_vec_floor, ang_floor, r_vec_floor_w]", "I.e. it yields a goal orientation for some axis alignment init_orient = np.zeros(3)", "to_H(R, T=np.zeros(3)): H = np.eye(4) H[:-1,:-1] = R H[:-1,-1] = T return H", "another, there are an infinit series of rotation matrices possible. Due to axially", "= closest_axis_2_userdefined(to_H(current_orient), init_orient) des_vec = sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:,", "index or rot_over == index_H0): rot_over += 1 if sign == sign_H0: angle", "convention will be used to construct the matrix with the rotation axis defined", "def find_rot_z(self, index_H0, sign_H0, index, sign): if index == index_H0: if sign ==", "== 0: vec_1 = H_via1[:-1, 1] vec_2 = H_via2[:-1, 1] else: vec_1 =", "symmetry plane between the two vectors. Hence the axis-angle convention will be used", "axis = np.cross(vector_orig, vector_fin) axis_len = np.linalg.norm(axis) if axis_len != 0.0: axis =", "def get_control_seq(self, ax=None): ## Control Sequence will provide rotation vector and desired rotation", "index, sign def R_2vect(self, vector_orig, vector_fin): \"\"\"Calculate the rotation matrix required to rotate", "sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec) first_goal = np.matmul(Rot1[:-1,", "import pybullet as p def todegree(w): return w*180/np.pi def torad(w): return w*np.pi/180 def", "np.eye(3)): c_to_g = [np.array([0, 0, 1]), 0.] else: rot_to_g = Rotation.from_matrix(R_to_g).as_rotvec() c_to_g =", "rotation_floor.as_matrix() R_floor_1 = to_H(R=R_floor_1) H_via1 = np.matmul(self.H0, R_floor_1) #H_via1[1,-1] = 0.3 else: r_vec_floor", "axis, angle def calculate_mutltiple_goals(init_information, obs): goal_orients = [] # This first calcuation step", "| R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z | | -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z", "= [np.array([0, 0, 1]), 0.] else: rot_to_2 = Rotation.from_matrix(R_to_2).as_rotvec() c_to_2 = [rot_to_2 /", "calculate a viapoint! I.e. it yields a goal orientation for some axis alignment", "second_goal) second_goal = Rotation.from_matrix(second_goal) goal_orients.append([second_goal.as_quat()+[0.001,0.001,0.001,0.001],[init_information[7],init_information[8],init_information[9]]]) #addition of noise needed since otherwise problems code,...", "@type vector_fin: numpy array, len 3 \"\"\" # Convert the vectors to unit", "z * sa + (1.0 - ca) * x * y R[1, 1]", "= np.zeros(3) r_vec_via12w[2] = np.sign(r_vec_via12_p[2]) r_vec_via12 = np.matmul(H_via1[:-1,:-1].T, r_vec_via12w) c12 = [r_vec_via12, ang_via12,", "np.matmul(Rot1[:-1, :-1], current_orient) first_goal = Rotation.from_matrix(first_goal) goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]]) #addition of noise needed since otherwise", "Via-2 ## R_via2 = H_via2[:-1,:-1] R_init = self.H0[:-1,:-1] R_to_2 = np.matmul(R_init.T, R_via2) if", "angle def closest_axis_2_normal(self, H): # print (H) # print (np.linalg.inv(H[:-1,:-1])) min_angle = 190", "a, the rotation matrix R is:: | 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z |", "- ca) * (z ** 2 - 1.0) return R, axis, angle def", "frame. @type vector_orig: numpy array, len 3 @param vector_fin: The rotated vector defined", "1.0 + (1.0 - ca) * (z ** 2 - 1.0) return R,", "for i in range(3): x = H[:-1, i] theta = todegree(angle(x, x_des)) #", "calculation applies the relative transformation which is desired based on the current observation!!!", "0] = -y * sa + (1.0 - ca) * x * z", "in the symmetry plane between the two vectors. Hence the axis-angle convention will", "np.zeros(3) r_vec_floor[index] = 1 ang_floor = 0. H_via1 = self.H0 r_vec_floor_w = np.matmul(self.H0[:-1,:-1],", "= 0 sign = 0 reverse = False for i in range(3): x", "set_goal(self, Hg): self.Hg = Hg def set_current_pose(self,H0): self.H0 = H0 def get_control_seq(self, ax=None):", "angle else: rot_over = 0 while (rot_over == index or rot_over == index_H0):", "closest_axis_2_normal(self, H): # print (H) # print (np.linalg.inv(H[:-1,:-1])) min_angle = 190 x_des =", "-x*sin(a)+(1-cos(a))*y*z | | -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) | @param R: The 3x3", "pybullet as p def todegree(w): return w*180/np.pi def torad(w): return w*np.pi/180 def angle(v1,", "index, sign): if index == index_H0: if sign == sign_H0: return None, None", "return vector / np.linalg.norm(vector) def add_one(index): if index+1 == 3: index_out = 0", "sign_H0, index, sign) # input (\"WAIT\") rot_index, ang_floor = self.find_rot_z(index_H0, sign_H0, index, sign)", "if np.allclose(R_to_2, np.eye(3)): c_to_2 = [np.array([0, 0, 1]), 0.] else: rot_to_2 = Rotation.from_matrix(R_to_2).as_rotvec()", "y, z] and the rotation angle a, the rotation matrix R is:: |", "############ From Viapoint 1 to Viapoint 2 ################ if index == 0: vec_1", "0.] else: rot_to_g = Rotation.from_matrix(R_to_g).as_rotvec() c_to_g = [rot_to_g / np.linalg.norm(rot_to_g, ord=2), np.linalg.norm(rot_to_g, ord=2)]", "vectors. The rotation angle is the arccosine of the dot product of the", "from start to Goal ### R_g = self.Hg[:-1, :-1] R_init = self.H0[:-1, :-1]", "rot_index is not None: r_vec_floor = np.zeros(3) r_vec_floor[rot_index] = 1 rotation_floor = Rotation.from_rotvec(ang_floor", "matrix required to rotate from one vector to another. For the rotation of", "defined in the reference frame. @type vector_fin: numpy array, len 3 \"\"\" #", "# print (theta) if theta > 90: theta = theta - 180 if", "np.eye(4) H[:-1,:-1] = R H[:-1,-1] = T return H def closest_axis_2_userdefined(H, vec): #print", "np.eye(4) # Calculate the rotation matrix elements. R[0, 0] = 1.0 + (1.0", "index_out = 0 else: index_out = index+1 return index_out def to_H(R, T=np.zeros(3)): H", "sign_H0: angle = -np.pi / 2 if add_one(rot_over) != index_H0: angle = -angle", "axis coordinates. x = axis[0] y = axis[1] z = axis[2] if x==0", "init_orient = np.asarray([1,0,0]) theta, index, sign = closest_axis_2_userdefined( to_H(current_orient), init_orient) des_vec = sign", "unrotated vector defined in the reference frame. @type vector_orig: numpy array, len 3", "H[:-1,:-1] = R H[:-1,-1] = T return H def closest_axis_2_userdefined(H, vec): #print (H)", "= axis / axis_len # Alias the axis coordinates. x = axis[0] y", "sign * 1]) R, r_vec_via2gw, ang_via = self.R_2vect(self.Hg[:-1, index], des_vec) H_via2 = np.matmul(R,", "if rot_index is not None: r_vec_floor = np.zeros(3) r_vec_floor[rot_index] = 1 rotation_floor =", "vec_2 = H_via2[:-1, 1] else: vec_1 = H_via1[:-1, 0] vec_2 = H_via2[:-1, 0]", "= np.matmul(rot_12,rot2g) if np.allclose(rot1g, np.eye(4)): c1g = [np.array([0,0,1]), 0.] else: rot1g = Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec()", "From Viapoint 1 to Viapoint 2 ################ if index == 0: vec_1 =", "# The rotation axis (normalised). axis = np.cross(vector_orig, vector_fin) axis_len = np.linalg.norm(axis) if", "init_orient[:2] = np.asarray(init_information[:2]) init_orient = init_orient / np.linalg.norm(init_orient) current_orient = np.asarray(p.getMatrixFromQuaternion(obs[\"object_orientation\"])).reshape(3, 3) theta,", "np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) def unit_vector(vector): \"\"\" Returns the unit vector of the", "- 1.0) R[0, 1] = -z * sa + (1.0 - ca) *", "unit_vector(vector): \"\"\" Returns the unit vector of the vector. \"\"\" return vector /", "index, sign = self.closest_axis_2_normal(self.Hg) des_vec = np.array([0, 0, sign * 1]) R, r_vec_via2gw,", "# this second calculation applies the relative transformation which is desired based on", "r_vec_floor = np.zeros(3) r_vec_floor[index] = 1 ang_floor = 0. H_via1 = self.H0 r_vec_floor_w", "x = H[:-1, i] theta = todegree(angle(x, x_des)) #print (theta) if theta >", "index], des_vec) second_goal = np.matmul(Rot1[:-1, :-1], current_orient) # now apply rotation: second_goal =", "to the rotation axis, w = [x, y, z] and the rotation angle", "[np.array([0, 0, 1]), 0.] else: rot_to_g = Rotation.from_matrix(R_to_g).as_rotvec() c_to_g = [rot_to_g / np.linalg.norm(rot_to_g,", "theta = todegree(angle(x, x_des)) #print (theta) if theta > 90: theta = theta", "on the current observation!!! # now take into account the desired rotation from", "3 @param vector_fin: The rotated vector defined in the reference frame. @type vector_fin:", "sign) if rot_index is not None: r_vec_floor = np.zeros(3) r_vec_floor[rot_index] = 1 rotation_floor", "unit_vector(v1) v2_u = unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) def unit_vector(vector): \"\"\" Returns", "desired rotation from the target information: des_rotation = np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3, 3) init_orient = np.asarray([1,0,0])", "the unit vector of the vector. \"\"\" return vector / np.linalg.norm(vector) def add_one(index):", "np import pybullet as p def todegree(w): return w*180/np.pi def torad(w): return w*np.pi/180", "ca) * y * z R[2, 2] = 1.0 + (1.0 - ca)", "* x * z R[1, 0] = z * sa + (1.0 -", "-angle return rot_over, angle def closest_axis_2_normal(self, H): # print (H) # print (np.linalg.inv(H[:-1,:-1]))", "return rot_over, angle def closest_axis_2_normal(self, H): # print (H) # print (np.linalg.inv(H[:-1,:-1])) min_angle", "todegree(w): return w*180/np.pi def torad(w): return w*np.pi/180 def angle(v1, v2): v1_u = unit_vector(v1)", "= R H[:-1,-1] = T return H def closest_axis_2_userdefined(H, vec): #print (H) #print", "1.0)) def unit_vector(vector): \"\"\" Returns the unit vector of the vector. \"\"\" return", "# This first calcuation step allows to calculate a viapoint! I.e. it yields", "will be used to construct the matrix with the rotation axis defined as", "vec_2 = H_via2[:-1, 0] R12, r_vec_via12_p, ang_via12 = self.R_2vect(vec_1, vec_2) r_vec_via12w = np.zeros(3)", "2] = 1.0 + (1.0 - ca) * (z ** 2 - 1.0)", "if np.abs(z) > big_Z: big_Z = np.abs(z) sign = np.sign(z) index = i", "-y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) | @param R: The 3x3 rotation matrix to", "rotated vector defined in the reference frame. @type vector_fin: numpy array, len 3", "ca) * x * z R[1, 0] = z * sa + (1.0", "Viapoint 2 ################################### theta, index, sign = self.closest_axis_2_normal(self.Hg) des_vec = np.array([0, 0, sign", "vectors. vector_orig = vector_orig / np.linalg.norm(vector_orig) vector_fin = vector_fin / np.linalg.norm(vector_fin) # The", "vector_fin / np.linalg.norm(vector_fin) # The rotation axis (normalised). axis = np.cross(vector_orig, vector_fin) axis_len", "i in range(3): x = H[:-1, i] theta = todegree(angle(x, x_des)) #print (theta)", "sa + (1.0 - ca) * x * y R[1, 1] = 1.0", "unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) def unit_vector(vector): \"\"\" Returns the unit vector", "* np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec) first_goal = np.matmul(Rot1[:-1, :-1],", "desired rotation to achieve target ## ################## Goal to Viapoint 2 ################################### theta,", "vector_orig: numpy array, len 3 @param vector_fin: The rotated vector defined in the", "** 2 - 1.0) R[0, 1] = -z * sa + (1.0 -", "i] # print(z) if np.abs(z) > big_Z: big_Z = np.abs(z) sign = np.sign(z)", "return w*180/np.pi def torad(w): return w*np.pi/180 def angle(v1, v2): v1_u = unit_vector(v1) v2_u", "to axially symmetry, the rotation axis can be any vector lying in the", "rotation axis can be any vector lying in the symmetry plane between the", "rotation matrix required to rotate from one vector to another. For the rotation", "self.H0 r_vec_floor_w = np.matmul(self.H0[:-1,:-1], r_vec_floor) c01 = [r_vec_floor, ang_floor, r_vec_floor_w] #################################################### ############ From", "np.linalg.norm(vector_fin) # The rotation axis (normalised). axis = np.cross(vector_orig, vector_fin) axis_len = np.linalg.norm(axis)", "min_angle = np.abs(theta) index = i if theta == 0.: if reverse: sign", "noise needed since otherwise problems code,... # this second calculation applies the relative", "r_vec_floor[index] = 1 ang_floor = 0. H_via1 = self.H0 r_vec_floor_w = np.matmul(self.H0[:-1,:-1], r_vec_floor)", "* z R[2, 1] = x * sa + (1.0 - ca) *", "+ (1.0 - ca) * x * y R[0, 2] = y *", "######## rot_12 = to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix()) rot2g = to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix()) rot1g = np.matmul(rot_12,rot2g) if np.allclose(rot1g, np.eye(4)):", "ca) * (z ** 2 - 1.0) return R, axis, angle def calculate_mutltiple_goals(init_information,", "* r_vec_floor) R_floor_1 = rotation_floor.as_matrix() R_floor_1 = to_H(R=R_floor_1) H_via1 = np.matmul(self.H0, R_floor_1) #H_via1[1,-1]", "r_vec_floor = np.zeros(3) r_vec_floor[rot_index] = 1 rotation_floor = Rotation.from_rotvec(ang_floor * r_vec_floor) R_floor_1 =", "numpy as np from scipy.spatial.transform import Rotation import numpy as np import pybullet", "Given a unit vector parallel to the rotation axis, w = [x, y,", "H[2, i] # print(z) if np.abs(z) > big_Z: big_Z = np.abs(z) sign =", "The rotation angle is the arccosine of the dot product of the two", "3 \"\"\" # Convert the vectors to unit vectors. vector_orig = vector_orig /", "command_seq, [c1g], [c_to_2, c_to_g] def find_index_z(self, H): big_Z = 0.0 index = 0", "r_vec_via12w) c12 = [r_vec_via12, ang_via12, r_vec_via12w] ########################################################### ##### COMPUTE SHORTCUT: ######## rot_12 =", "R is:: | 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z | R = | z*sin(a)+(1-cos(a))*x*y", ":-1], current_orient) # now apply rotation: second_goal = np.matmul(des_rotation, second_goal) # now rotate", "can be any vector lying in the symmetry plane between the two vectors.", "+ (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z | | -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) | @param R:", "[] # This first calcuation step allows to calculate a viapoint! I.e. it", "1] else: vec_1 = H_via1[:-1, 0] vec_2 = H_via2[:-1, 0] R12, r_vec_via12_p, ang_via12", "[c01, c12,c2g] return command_seq, [c1g], [c_to_2, c_to_g] def find_index_z(self, H): big_Z = 0.0", "1 else: sign = np.sign(theta) return min_angle, index, sign def R_2vect(vector_orig, vector_fin): \"\"\"Calculate", "rotation from start to Via-2 ## R_via2 = H_via2[:-1,:-1] R_init = self.H0[:-1,:-1] R_to_2", "sa = np.sin(angle) R = np.eye(4) # Calculate the rotation matrix elements. R[0,", "/ np.linalg.norm(vector_fin) # The rotation axis (normalised). axis = np.cross(vector_orig, vector_fin) axis_len =", "array, len 3 @param vector_fin: The rotated vector defined in the reference frame.", "the two vectors. Hence the axis-angle convention will be used to construct the", "def set_goal(self, Hg): self.Hg = Hg def set_current_pose(self,H0): self.H0 = H0 def get_control_seq(self,", "= x * sa + (1.0 - ca) * y * z R[2,", "self.Hg = Hg def set_current_pose(self,H0): self.H0 = H0 def get_control_seq(self, ax=None): ## Control", "- ca) * (z ** 2 - 1.0) return R, axis, angle class", "-1] = 0. r_vec_via2g = np.matmul(H_via2[:-1,:-1].T, r_vec_via2gw) c2g = [r_vec_via2g, -ang_via, r_vec_via2gw] #########################################################################", "* x * y R[0, 2] = y * sa + (1.0 -", "this maths once!). ca = np.cos(angle) sa = np.sin(angle) R = np.eye(4) #", "** 2 - 1.0) return R, axis, angle def calculate_mutltiple_goals(init_information, obs): goal_orients =", "second calculation applies the relative transformation which is desired based on the current", "+ (1.0 - ca) * x * z R[1, 0] = z *", "from the target information: des_rotation = np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3, 3) init_orient = np.asarray([1,0,0]) theta, index,", "theta == 0.: if reverse: sign = -1 else: sign = 1 else:", "+ (1.0 - ca) * (x ** 2 - 1.0) R[0, 1] =", "#################################################### ############ From Viapoint 1 to Viapoint 2 ################ if index == 0:", "/ np.linalg.norm(rot_to_2, ord=2), np.linalg.norm(rot_to_2, ord=2)] ##### Compute rotation from start to Goal ###", "sa + (1.0 - ca) * x * z R[1, 0] = z", "= rotation_floor.as_matrix() R_floor_1 = to_H(R=R_floor_1) H_via1 = np.matmul(self.H0, R_floor_1) #H_via1[1,-1] = 0.3 else:", "print (index_H0, sign_H0, index, sign) # input (\"WAIT\") rot_index, ang_floor = self.find_rot_z(index_H0, sign_H0,", "to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix()) rot2g = to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix()) rot1g = np.matmul(rot_12,rot2g) if np.allclose(rot1g, np.eye(4)): c1g = [np.array([0,0,1]),", "z R[2, 2] = 1.0 + (1.0 - ca) * (z ** 2", "it yields a goal orientation for some axis alignment init_orient = np.zeros(3) init_orient[:2]", "2 - 1.0) R[0, 1] = -z * sa + (1.0 - ca)", "1 else: sign = np.sign(theta) return min_angle, index, sign def R_2vect(self, vector_orig, vector_fin):", "def R_2vect(vector_orig, vector_fin): \"\"\"Calculate the rotation matrix required to rotate from one vector", "* y * z R[2, 0] = -y * sa + (1.0 -", "axis, w = [x, y, z] and the rotation angle a, the rotation", "return w*np.pi/180 def angle(v1, v2): v1_u = unit_vector(v1) v2_u = unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u,", "des_vec = sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec) first_goal", "== index or rot_over == index_H0): rot_over += 1 if sign == sign_H0:", "min_angle = 190 x_des = np.array(vec) index = 0 sign = 0 reverse", "ang_via = self.R_2vect(self.Hg[:-1, index], des_vec) H_via2 = np.matmul(R, self.Hg) H_via2[:-1,-1] = self.Hg[:-1,-1] H_via2[2,", "rot2g = to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix()) rot1g = np.matmul(rot_12,rot2g) if np.allclose(rot1g, np.eye(4)): c1g = [np.array([0,0,1]), 0.]", "np.sign(r_vec_via12_p[2]) r_vec_via12 = np.matmul(H_via1[:-1,:-1].T, r_vec_via12w) c12 = [r_vec_via12, ang_via12, r_vec_via12w] ########################################################### ##### COMPUTE", "= np.zeros(3) r_vec_floor[index] = 1 ang_floor = 0. H_via1 = self.H0 r_vec_floor_w =", "2 if add_one(rot_over) != index_H0: angle = -angle else: angle = np.pi /", "(1.0 - ca) * y * z R[2, 2] = 1.0 + (1.0", "/ axis_len # Alias the axis coordinates. x = axis[0] y = axis[1]", "def angle(v1, v2): v1_u = unit_vector(v1) v2_u = unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0,", "= [r_vec_floor, ang_floor, r_vec_floor_w] #################################################### ############ From Viapoint 1 to Viapoint 2 ################", "x = axis[0] y = axis[1] z = axis[2] if x==0 and y==0", "Hg def set_goal(self, Hg): self.Hg = Hg def set_current_pose(self,H0): self.H0 = H0 def", "3: index_out = 0 else: index_out = index+1 return index_out def to_H(R, T=np.zeros(3)):", "3x3 numpy array @param vector_orig: The unrotated vector defined in the reference frame.", "np.zeros(3) r_vec_floor[rot_index] = 1 rotation_floor = Rotation.from_rotvec(ang_floor * r_vec_floor) R_floor_1 = rotation_floor.as_matrix() R_floor_1", "H_via1 = self.H0 r_vec_floor_w = np.matmul(self.H0[:-1,:-1], r_vec_floor) c01 = [r_vec_floor, ang_floor, r_vec_floor_w] ####################################################", "index_H0, sign_H0 = self.find_index_z(self.H0) #theta_0 ,index_H0, sign_H0 = self.closest_axis_2_normal(self.H0) # print (index_H0, sign_H0,", "angle def calculate_mutltiple_goals(init_information, obs): goal_orients = [] # This first calcuation step allows", "des_vec = sign * np.array(init_orient) Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec) second_goal", "Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec() c1g = [rot1g / np.linalg.norm(rot1g,ord=2), np.linalg.norm(rot1g,ord=2)] ##### Compute rotation from start to", "np.linalg.norm(rot_to_g, ord=2), np.linalg.norm(rot_to_g, ord=2)] command_seq = [c01, c12,c2g] return command_seq, [c1g], [c_to_2, c_to_g]", "with the rotation axis defined as the cross product of the two vectors.", "rot_over, angle else: rot_over = 0 while (rot_over == index or rot_over ==", "to another, there are an infinit series of rotation matrices possible. Due to", "coordinates. x = axis[0] y = axis[1] z = axis[2] if x==0 and", "functions (only need to do this maths once!). ca = np.cos(angle) sa =", "= [r_vec_via12, ang_via12, r_vec_via12w] ########################################################### ##### COMPUTE SHORTCUT: ######## rot_12 = to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix()) rot2g", "if theta == 0.: if reverse: sign = -1 else: sign = 1", "is the arccosine of the dot product of the two unit vectors. Given", "lying in the symmetry plane between the two vectors. Hence the axis-angle convention", "Rotation.from_matrix(R_to_g).as_rotvec() c_to_g = [rot_to_g / np.linalg.norm(rot_to_g, ord=2), np.linalg.norm(rot_to_g, ord=2)] command_seq = [c01, c12,c2g]", "theta, index, sign = self.closest_axis_2_normal(self.Hg) des_vec = np.array([0, 0, sign * 1]) R,", "the reference frame. @type vector_orig: numpy array, len 3 @param vector_fin: The rotated", "3) theta, index, sign = closest_axis_2_userdefined(to_H(current_orient), init_orient) des_vec = sign * np.array(init_orient) Rot1,", "code,... # this second calculation applies the relative transformation which is desired based", "(1.0 - ca) * (z ** 2 - 1.0) return R, axis, angle", "big_Z = 0.0 index = 0 for i in range(3): z = H[2,", "x_des)) # print (theta) if theta > 90: theta = theta - 180" ]
[ "pymongo import MongoClient import os def get_client(): client = MongoClient('searchtube_mongo', 27017, username=os.environ['DB_USERNAME'], password=os.environ['DB_PASSWORD'])", "<reponame>dermasmid/searchtube from pymongo import MongoClient import os def get_client(): client = MongoClient('searchtube_mongo', 27017,", "MongoClient import os def get_client(): client = MongoClient('searchtube_mongo', 27017, username=os.environ['DB_USERNAME'], password=os.environ['DB_PASSWORD']) return client", "from pymongo import MongoClient import os def get_client(): client = MongoClient('searchtube_mongo', 27017, username=os.environ['DB_USERNAME'],", "import MongoClient import os def get_client(): client = MongoClient('searchtube_mongo', 27017, username=os.environ['DB_USERNAME'], password=os.environ['DB_PASSWORD']) return" ]
[ "\"\"\" hash_md5 = hashlib.md5() with open(file_path, 'rb') as fhandle: for chunk in iter(lambda:", "__name__ == '__main__': PARSER = argparse.ArgumentParser(description='Make MAESTRO index file.') PARSER.add_argument( 'maestro_data_path', type=str, help='Path", "'../mirdata/indexes/maestro_index.json' def md5(file_path): \"\"\"Get md5 hash of a file. Parameters ---------- file_path: str", "hash of a file. Parameters ---------- file_path: str File path. Returns ------- md5_hash:", "hash_md5.update(chunk) return hash_md5.hexdigest() def make_maestro_index(data_path): metadata_path = os.path.join(data_path, 'maestro-v2.0.0.json') print(metadata_path) maestro_index = {}", "MAESTRO index file.') PARSER.add_argument( 'maestro_data_path', type=str, help='Path to MAESTRO data folder.' ) main(PARSER.parse_args())", "fhandle: for chunk in iter(lambda: fhandle.read(4096), b''): hash_md5.update(chunk) return hash_md5.hexdigest() def make_maestro_index(data_path): metadata_path", "md5_hash: str md5 hash of data in file_path \"\"\" hash_md5 = hashlib.md5() with", "import argparse import hashlib import json import csv import os MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json'", "\"\"\"Get md5 hash of a file. Parameters ---------- file_path: str File path. Returns", "{} midi_path = os.path.join(data_path, row['midi_filename']) midi_checksum = md5(midi_path) maestro_index[trackid]['midi'] = [row['midi_filename'], midi_checksum] audio_path", "json.load(fhandle) for i, row in enumerate(metadata): print(i) trackid = row['midi_filename'].split('.')[0] maestro_index[trackid] = {}", "file. Parameters ---------- file_path: str File path. Returns ------- md5_hash: str md5 hash", "fhandle.read(4096), b''): hash_md5.update(chunk) return hash_md5.hexdigest() def make_maestro_index(data_path): metadata_path = os.path.join(data_path, 'maestro-v2.0.0.json') print(metadata_path) maestro_index", "a file. Parameters ---------- file_path: str File path. Returns ------- md5_hash: str md5", "md5(audio_path) maestro_index[trackid]['audio'] = [row['audio_filename'], audio_checksum] with open(MAESTRO_INDEX_PATH, 'w') as fhandle: json.dump(maestro_index, fhandle, indent=2)", "file_path: str File path. Returns ------- md5_hash: str md5 hash of data in", "import os MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json' def md5(file_path): \"\"\"Get md5 hash of a file.", "hashlib import json import csv import os MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json' def md5(file_path): \"\"\"Get", "= os.path.join(data_path, row['midi_filename']) midi_checksum = md5(midi_path) maestro_index[trackid]['midi'] = [row['midi_filename'], midi_checksum] audio_path = os.path.join(data_path,", "midi_path = os.path.join(data_path, row['midi_filename']) midi_checksum = md5(midi_path) maestro_index[trackid]['midi'] = [row['midi_filename'], midi_checksum] audio_path =", "import hashlib import json import csv import os MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json' def md5(file_path):", "for i, row in enumerate(metadata): print(i) trackid = row['midi_filename'].split('.')[0] maestro_index[trackid] = {} midi_path", "File path. Returns ------- md5_hash: str md5 hash of data in file_path \"\"\"", "main(args): print(\"creating index...\") make_maestro_index(args.maestro_data_path) print(\"done!\") if __name__ == '__main__': PARSER = argparse.ArgumentParser(description='Make MAESTRO", "[row['midi_filename'], midi_checksum] audio_path = os.path.join(data_path, row['audio_filename']) audio_checksum = md5(audio_path) maestro_index[trackid]['audio'] = [row['audio_filename'], audio_checksum]", "as fhandle: json.dump(maestro_index, fhandle, indent=2) def main(args): print(\"creating index...\") make_maestro_index(args.maestro_data_path) print(\"done!\") if __name__", "str md5 hash of data in file_path \"\"\" hash_md5 = hashlib.md5() with open(file_path,", "print(i) trackid = row['midi_filename'].split('.')[0] maestro_index[trackid] = {} midi_path = os.path.join(data_path, row['midi_filename']) midi_checksum =", "trackid = row['midi_filename'].split('.')[0] maestro_index[trackid] = {} midi_path = os.path.join(data_path, row['midi_filename']) midi_checksum = md5(midi_path)", "fhandle, indent=2) def main(args): print(\"creating index...\") make_maestro_index(args.maestro_data_path) print(\"done!\") if __name__ == '__main__': PARSER", "Parameters ---------- file_path: str File path. Returns ------- md5_hash: str md5 hash of", "chunk in iter(lambda: fhandle.read(4096), b''): hash_md5.update(chunk) return hash_md5.hexdigest() def make_maestro_index(data_path): metadata_path = os.path.join(data_path,", "hash_md5.hexdigest() def make_maestro_index(data_path): metadata_path = os.path.join(data_path, 'maestro-v2.0.0.json') print(metadata_path) maestro_index = {} with open(metadata_path,", "{} with open(metadata_path, 'r') as fhandle: metadata = json.load(fhandle) for i, row in", "of a file. Parameters ---------- file_path: str File path. Returns ------- md5_hash: str", "import json import csv import os MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json' def md5(file_path): \"\"\"Get md5", "make_maestro_index(data_path): metadata_path = os.path.join(data_path, 'maestro-v2.0.0.json') print(metadata_path) maestro_index = {} with open(metadata_path, 'r') as", "---------- file_path: str File path. Returns ------- md5_hash: str md5 hash of data", "hash of data in file_path \"\"\" hash_md5 = hashlib.md5() with open(file_path, 'rb') as", "= {} with open(metadata_path, 'r') as fhandle: metadata = json.load(fhandle) for i, row", "enumerate(metadata): print(i) trackid = row['midi_filename'].split('.')[0] maestro_index[trackid] = {} midi_path = os.path.join(data_path, row['midi_filename']) midi_checksum", "maestro_index = {} with open(metadata_path, 'r') as fhandle: metadata = json.load(fhandle) for i,", "midi_checksum = md5(midi_path) maestro_index[trackid]['midi'] = [row['midi_filename'], midi_checksum] audio_path = os.path.join(data_path, row['audio_filename']) audio_checksum =", "in enumerate(metadata): print(i) trackid = row['midi_filename'].split('.')[0] maestro_index[trackid] = {} midi_path = os.path.join(data_path, row['midi_filename'])", "as fhandle: for chunk in iter(lambda: fhandle.read(4096), b''): hash_md5.update(chunk) return hash_md5.hexdigest() def make_maestro_index(data_path):", "= {} midi_path = os.path.join(data_path, row['midi_filename']) midi_checksum = md5(midi_path) maestro_index[trackid]['midi'] = [row['midi_filename'], midi_checksum]", "fhandle: json.dump(maestro_index, fhandle, indent=2) def main(args): print(\"creating index...\") make_maestro_index(args.maestro_data_path) print(\"done!\") if __name__ ==", "os.path.join(data_path, row['midi_filename']) midi_checksum = md5(midi_path) maestro_index[trackid]['midi'] = [row['midi_filename'], midi_checksum] audio_path = os.path.join(data_path, row['audio_filename'])", "= md5(audio_path) maestro_index[trackid]['audio'] = [row['audio_filename'], audio_checksum] with open(MAESTRO_INDEX_PATH, 'w') as fhandle: json.dump(maestro_index, fhandle,", "i, row in enumerate(metadata): print(i) trackid = row['midi_filename'].split('.')[0] maestro_index[trackid] = {} midi_path =", "[row['audio_filename'], audio_checksum] with open(MAESTRO_INDEX_PATH, 'w') as fhandle: json.dump(maestro_index, fhandle, indent=2) def main(args): print(\"creating", "row in enumerate(metadata): print(i) trackid = row['midi_filename'].split('.')[0] maestro_index[trackid] = {} midi_path = os.path.join(data_path,", "path. Returns ------- md5_hash: str md5 hash of data in file_path \"\"\" hash_md5", "'w') as fhandle: json.dump(maestro_index, fhandle, indent=2) def main(args): print(\"creating index...\") make_maestro_index(args.maestro_data_path) print(\"done!\") if", "audio_checksum] with open(MAESTRO_INDEX_PATH, 'w') as fhandle: json.dump(maestro_index, fhandle, indent=2) def main(args): print(\"creating index...\")", "with open(file_path, 'rb') as fhandle: for chunk in iter(lambda: fhandle.read(4096), b''): hash_md5.update(chunk) return", "indent=2) def main(args): print(\"creating index...\") make_maestro_index(args.maestro_data_path) print(\"done!\") if __name__ == '__main__': PARSER =", "with open(MAESTRO_INDEX_PATH, 'w') as fhandle: json.dump(maestro_index, fhandle, indent=2) def main(args): print(\"creating index...\") make_maestro_index(args.maestro_data_path)", "make_maestro_index(args.maestro_data_path) print(\"done!\") if __name__ == '__main__': PARSER = argparse.ArgumentParser(description='Make MAESTRO index file.') PARSER.add_argument(", "------- md5_hash: str md5 hash of data in file_path \"\"\" hash_md5 = hashlib.md5()", "MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json' def md5(file_path): \"\"\"Get md5 hash of a file. Parameters ----------", "'rb') as fhandle: for chunk in iter(lambda: fhandle.read(4096), b''): hash_md5.update(chunk) return hash_md5.hexdigest() def", "import csv import os MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json' def md5(file_path): \"\"\"Get md5 hash of", "with open(metadata_path, 'r') as fhandle: metadata = json.load(fhandle) for i, row in enumerate(metadata):", "maestro_index[trackid] = {} midi_path = os.path.join(data_path, row['midi_filename']) midi_checksum = md5(midi_path) maestro_index[trackid]['midi'] = [row['midi_filename'],", "midi_checksum] audio_path = os.path.join(data_path, row['audio_filename']) audio_checksum = md5(audio_path) maestro_index[trackid]['audio'] = [row['audio_filename'], audio_checksum] with", "if __name__ == '__main__': PARSER = argparse.ArgumentParser(description='Make MAESTRO index file.') PARSER.add_argument( 'maestro_data_path', type=str,", "argparse.ArgumentParser(description='Make MAESTRO index file.') PARSER.add_argument( 'maestro_data_path', type=str, help='Path to MAESTRO data folder.' )", "print(\"done!\") if __name__ == '__main__': PARSER = argparse.ArgumentParser(description='Make MAESTRO index file.') PARSER.add_argument( 'maestro_data_path',", "row['audio_filename']) audio_checksum = md5(audio_path) maestro_index[trackid]['audio'] = [row['audio_filename'], audio_checksum] with open(MAESTRO_INDEX_PATH, 'w') as fhandle:", "= '../mirdata/indexes/maestro_index.json' def md5(file_path): \"\"\"Get md5 hash of a file. Parameters ---------- file_path:", "= json.load(fhandle) for i, row in enumerate(metadata): print(i) trackid = row['midi_filename'].split('.')[0] maestro_index[trackid] =", "open(file_path, 'rb') as fhandle: for chunk in iter(lambda: fhandle.read(4096), b''): hash_md5.update(chunk) return hash_md5.hexdigest()", "= hashlib.md5() with open(file_path, 'rb') as fhandle: for chunk in iter(lambda: fhandle.read(4096), b''):", "print(metadata_path) maestro_index = {} with open(metadata_path, 'r') as fhandle: metadata = json.load(fhandle) for", "def make_maestro_index(data_path): metadata_path = os.path.join(data_path, 'maestro-v2.0.0.json') print(metadata_path) maestro_index = {} with open(metadata_path, 'r')", "hash_md5 = hashlib.md5() with open(file_path, 'rb') as fhandle: for chunk in iter(lambda: fhandle.read(4096),", "metadata = json.load(fhandle) for i, row in enumerate(metadata): print(i) trackid = row['midi_filename'].split('.')[0] maestro_index[trackid]", "json import csv import os MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json' def md5(file_path): \"\"\"Get md5 hash", "fhandle: metadata = json.load(fhandle) for i, row in enumerate(metadata): print(i) trackid = row['midi_filename'].split('.')[0]", "= md5(midi_path) maestro_index[trackid]['midi'] = [row['midi_filename'], midi_checksum] audio_path = os.path.join(data_path, row['audio_filename']) audio_checksum = md5(audio_path)", "for chunk in iter(lambda: fhandle.read(4096), b''): hash_md5.update(chunk) return hash_md5.hexdigest() def make_maestro_index(data_path): metadata_path =", "def md5(file_path): \"\"\"Get md5 hash of a file. Parameters ---------- file_path: str File", "md5 hash of data in file_path \"\"\" hash_md5 = hashlib.md5() with open(file_path, 'rb')", "'r') as fhandle: metadata = json.load(fhandle) for i, row in enumerate(metadata): print(i) trackid", "= [row['audio_filename'], audio_checksum] with open(MAESTRO_INDEX_PATH, 'w') as fhandle: json.dump(maestro_index, fhandle, indent=2) def main(args):", "md5(file_path): \"\"\"Get md5 hash of a file. Parameters ---------- file_path: str File path.", "argparse import hashlib import json import csv import os MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json' def", "maestro_index[trackid]['audio'] = [row['audio_filename'], audio_checksum] with open(MAESTRO_INDEX_PATH, 'w') as fhandle: json.dump(maestro_index, fhandle, indent=2) def", "maestro_index[trackid]['midi'] = [row['midi_filename'], midi_checksum] audio_path = os.path.join(data_path, row['audio_filename']) audio_checksum = md5(audio_path) maestro_index[trackid]['audio'] =", "os.path.join(data_path, row['audio_filename']) audio_checksum = md5(audio_path) maestro_index[trackid]['audio'] = [row['audio_filename'], audio_checksum] with open(MAESTRO_INDEX_PATH, 'w') as", "audio_checksum = md5(audio_path) maestro_index[trackid]['audio'] = [row['audio_filename'], audio_checksum] with open(MAESTRO_INDEX_PATH, 'w') as fhandle: json.dump(maestro_index,", "md5 hash of a file. Parameters ---------- file_path: str File path. Returns -------", "in file_path \"\"\" hash_md5 = hashlib.md5() with open(file_path, 'rb') as fhandle: for chunk", "file_path \"\"\" hash_md5 = hashlib.md5() with open(file_path, 'rb') as fhandle: for chunk in", "metadata_path = os.path.join(data_path, 'maestro-v2.0.0.json') print(metadata_path) maestro_index = {} with open(metadata_path, 'r') as fhandle:", "= row['midi_filename'].split('.')[0] maestro_index[trackid] = {} midi_path = os.path.join(data_path, row['midi_filename']) midi_checksum = md5(midi_path) maestro_index[trackid]['midi']", "= argparse.ArgumentParser(description='Make MAESTRO index file.') PARSER.add_argument( 'maestro_data_path', type=str, help='Path to MAESTRO data folder.'", "= [row['midi_filename'], midi_checksum] audio_path = os.path.join(data_path, row['audio_filename']) audio_checksum = md5(audio_path) maestro_index[trackid]['audio'] = [row['audio_filename'],", "open(MAESTRO_INDEX_PATH, 'w') as fhandle: json.dump(maestro_index, fhandle, indent=2) def main(args): print(\"creating index...\") make_maestro_index(args.maestro_data_path) print(\"done!\")", "data in file_path \"\"\" hash_md5 = hashlib.md5() with open(file_path, 'rb') as fhandle: for", "md5(midi_path) maestro_index[trackid]['midi'] = [row['midi_filename'], midi_checksum] audio_path = os.path.join(data_path, row['audio_filename']) audio_checksum = md5(audio_path) maestro_index[trackid]['audio']", "os MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json' def md5(file_path): \"\"\"Get md5 hash of a file. Parameters", "hashlib.md5() with open(file_path, 'rb') as fhandle: for chunk in iter(lambda: fhandle.read(4096), b''): hash_md5.update(chunk)", "row['midi_filename'].split('.')[0] maestro_index[trackid] = {} midi_path = os.path.join(data_path, row['midi_filename']) midi_checksum = md5(midi_path) maestro_index[trackid]['midi'] =", "row['midi_filename']) midi_checksum = md5(midi_path) maestro_index[trackid]['midi'] = [row['midi_filename'], midi_checksum] audio_path = os.path.join(data_path, row['audio_filename']) audio_checksum", "os.path.join(data_path, 'maestro-v2.0.0.json') print(metadata_path) maestro_index = {} with open(metadata_path, 'r') as fhandle: metadata =", "csv import os MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json' def md5(file_path): \"\"\"Get md5 hash of a", "json.dump(maestro_index, fhandle, indent=2) def main(args): print(\"creating index...\") make_maestro_index(args.maestro_data_path) print(\"done!\") if __name__ == '__main__':", "PARSER = argparse.ArgumentParser(description='Make MAESTRO index file.') PARSER.add_argument( 'maestro_data_path', type=str, help='Path to MAESTRO data", "b''): hash_md5.update(chunk) return hash_md5.hexdigest() def make_maestro_index(data_path): metadata_path = os.path.join(data_path, 'maestro-v2.0.0.json') print(metadata_path) maestro_index =", "str File path. Returns ------- md5_hash: str md5 hash of data in file_path", "in iter(lambda: fhandle.read(4096), b''): hash_md5.update(chunk) return hash_md5.hexdigest() def make_maestro_index(data_path): metadata_path = os.path.join(data_path, 'maestro-v2.0.0.json')", "Returns ------- md5_hash: str md5 hash of data in file_path \"\"\" hash_md5 =", "= os.path.join(data_path, 'maestro-v2.0.0.json') print(metadata_path) maestro_index = {} with open(metadata_path, 'r') as fhandle: metadata", "as fhandle: metadata = json.load(fhandle) for i, row in enumerate(metadata): print(i) trackid =", "print(\"creating index...\") make_maestro_index(args.maestro_data_path) print(\"done!\") if __name__ == '__main__': PARSER = argparse.ArgumentParser(description='Make MAESTRO index", "iter(lambda: fhandle.read(4096), b''): hash_md5.update(chunk) return hash_md5.hexdigest() def make_maestro_index(data_path): metadata_path = os.path.join(data_path, 'maestro-v2.0.0.json') print(metadata_path)", "return hash_md5.hexdigest() def make_maestro_index(data_path): metadata_path = os.path.join(data_path, 'maestro-v2.0.0.json') print(metadata_path) maestro_index = {} with", "def main(args): print(\"creating index...\") make_maestro_index(args.maestro_data_path) print(\"done!\") if __name__ == '__main__': PARSER = argparse.ArgumentParser(description='Make", "'__main__': PARSER = argparse.ArgumentParser(description='Make MAESTRO index file.') PARSER.add_argument( 'maestro_data_path', type=str, help='Path to MAESTRO", "'maestro-v2.0.0.json') print(metadata_path) maestro_index = {} with open(metadata_path, 'r') as fhandle: metadata = json.load(fhandle)", "audio_path = os.path.join(data_path, row['audio_filename']) audio_checksum = md5(audio_path) maestro_index[trackid]['audio'] = [row['audio_filename'], audio_checksum] with open(MAESTRO_INDEX_PATH,", "= os.path.join(data_path, row['audio_filename']) audio_checksum = md5(audio_path) maestro_index[trackid]['audio'] = [row['audio_filename'], audio_checksum] with open(MAESTRO_INDEX_PATH, 'w')", "index...\") make_maestro_index(args.maestro_data_path) print(\"done!\") if __name__ == '__main__': PARSER = argparse.ArgumentParser(description='Make MAESTRO index file.')", "open(metadata_path, 'r') as fhandle: metadata = json.load(fhandle) for i, row in enumerate(metadata): print(i)", "== '__main__': PARSER = argparse.ArgumentParser(description='Make MAESTRO index file.') PARSER.add_argument( 'maestro_data_path', type=str, help='Path to", "of data in file_path \"\"\" hash_md5 = hashlib.md5() with open(file_path, 'rb') as fhandle:" ]
[ "def image(self): return self._screenshot @property def image_bytes(self): bytesio = io.BytesIO() self._screenshot.save(bytesio, \"PNG\") bytesio.seek(0)", "= Image.open(io.BytesIO(driver.get_screenshot_as_png())) print(\"Got a screenshot with the following dimensions: {0}\".format(self._screenshot.size)) if crop: #", "driver.get(url) # get the screenshot and make it into a Pillow Image self._screenshot", "height: driver.set_window_size(width, height) # go and get the content at the url driver.get(url)", "# crop the image self._screenshot = self._screenshot.crop((0,0, width, height)) print(\"Cropped the image to:", "subprocess import Popen, PIPE from selenium import webdriver from PIL import Image import", "def capture(self, url, width, height, crop=True): print (\"Capturing website screenshot of: \" +", "image_bytes(self): bytesio = io.BytesIO() self._screenshot.save(bytesio, \"PNG\") bytesio.seek(0) return bytesio.getvalue() if __name__ == \"__main__\":", "\" + url) driver = webdriver.PhantomJS() if width and height: driver.set_window_size(width, height) #", "crop the image self._screenshot = self._screenshot.crop((0,0, width, height)) print(\"Cropped the image to: {0}", "= io.BytesIO() self._screenshot.save(bytesio, \"PNG\") bytesio.seek(0) return bytesio.getvalue() if __name__ == \"__main__\": import const", "return bytesio.getvalue() if __name__ == \"__main__\": import const g = WebsiteScreenshotGenerator() #g.do_screen_capturing(const.ApodEclipsePage(), \"/Users/michaelheydt/thumbnail.png\",", "WebsiteScreenshotGenerator() #g.do_screen_capturing(const.ApodEclipsePage(), \"/Users/michaelheydt/thumbnail.png\", 500, 100) g.do_screen_capturing(\"http://espn.go.com\", 500, 100) # need to explicitly crop", "class WebsiteScreenshotGenerator(): def __init__(self): self._screenshot = None def capture(self, url, width, height, crop=True):", "Image.open(io.BytesIO(driver.get_screenshot_as_png())) print(\"Got a screenshot with the following dimensions: {0}\".format(self._screenshot.size)) if crop: # crop", "__name__ == \"__main__\": import const g = WebsiteScreenshotGenerator() #g.do_screen_capturing(const.ApodEclipsePage(), \"/Users/michaelheydt/thumbnail.png\", 500, 100) g.do_screen_capturing(\"http://espn.go.com\",", "of: \" + url) driver = webdriver.PhantomJS() if width and height: driver.set_window_size(width, height)", "at the url driver.get(url) # get the screenshot and make it into a", "the image self._screenshot = self._screenshot.crop((0,0, width, height)) print(\"Cropped the image to: {0} {1}\".format(width,", "a Pillow Image self._screenshot = Image.open(io.BytesIO(driver.get_screenshot_as_png())) print(\"Got a screenshot with the following dimensions:", "height)) return self @property def image(self): return self._screenshot @property def image_bytes(self): bytesio =", "\"PNG\") bytesio.seek(0) return bytesio.getvalue() if __name__ == \"__main__\": import const g = WebsiteScreenshotGenerator()", "PIPE from selenium import webdriver from PIL import Image import io class WebsiteScreenshotGenerator():", "if crop: # crop the image self._screenshot = self._screenshot.crop((0,0, width, height)) print(\"Cropped the", "= WebsiteScreenshotGenerator() #g.do_screen_capturing(const.ApodEclipsePage(), \"/Users/michaelheydt/thumbnail.png\", 500, 100) g.do_screen_capturing(\"http://espn.go.com\", 500, 100) # need to explicitly", "Image self._screenshot = Image.open(io.BytesIO(driver.get_screenshot_as_png())) print(\"Got a screenshot with the following dimensions: {0}\".format(self._screenshot.size)) if", "crop: # crop the image self._screenshot = self._screenshot.crop((0,0, width, height)) print(\"Cropped the image", "with the following dimensions: {0}\".format(self._screenshot.size)) if crop: # crop the image self._screenshot =", "capture(self, url, width, height, crop=True): print (\"Capturing website screenshot of: \" + url)", "driver.set_window_size(width, height) # go and get the content at the url driver.get(url) #", "selenium import webdriver from PIL import Image import io class WebsiteScreenshotGenerator(): def __init__(self):", "self._screenshot @property def image_bytes(self): bytesio = io.BytesIO() self._screenshot.save(bytesio, \"PNG\") bytesio.seek(0) return bytesio.getvalue() if", "webdriver.PhantomJS() if width and height: driver.set_window_size(width, height) # go and get the content", "crop=True): print (\"Capturing website screenshot of: \" + url) driver = webdriver.PhantomJS() if", "height, crop=True): print (\"Capturing website screenshot of: \" + url) driver = webdriver.PhantomJS()", "# go and get the content at the url driver.get(url) # get the", "screenshot and make it into a Pillow Image self._screenshot = Image.open(io.BytesIO(driver.get_screenshot_as_png())) print(\"Got a", "height) # go and get the content at the url driver.get(url) # get", "to: {0} {1}\".format(width, height)) return self @property def image(self): return self._screenshot @property def", "import const g = WebsiteScreenshotGenerator() #g.do_screen_capturing(const.ApodEclipsePage(), \"/Users/michaelheydt/thumbnail.png\", 500, 100) g.do_screen_capturing(\"http://espn.go.com\", 500, 100) #", "== \"__main__\": import const g = WebsiteScreenshotGenerator() #g.do_screen_capturing(const.ApodEclipsePage(), \"/Users/michaelheydt/thumbnail.png\", 500, 100) g.do_screen_capturing(\"http://espn.go.com\", 500,", "screenshot with the following dimensions: {0}\".format(self._screenshot.size)) if crop: # crop the image self._screenshot", "(\"Capturing website screenshot of: \" + url) driver = webdriver.PhantomJS() if width and", "self._screenshot.save(bytesio, \"PNG\") bytesio.seek(0) return bytesio.getvalue() if __name__ == \"__main__\": import const g =", "import io class WebsiteScreenshotGenerator(): def __init__(self): self._screenshot = None def capture(self, url, width,", "bytesio.seek(0) return bytesio.getvalue() if __name__ == \"__main__\": import const g = WebsiteScreenshotGenerator() #g.do_screen_capturing(const.ApodEclipsePage(),", "and make it into a Pillow Image self._screenshot = Image.open(io.BytesIO(driver.get_screenshot_as_png())) print(\"Got a screenshot", "image(self): return self._screenshot @property def image_bytes(self): bytesio = io.BytesIO() self._screenshot.save(bytesio, \"PNG\") bytesio.seek(0) return", "Popen, PIPE from selenium import webdriver from PIL import Image import io class", "__init__(self): self._screenshot = None def capture(self, url, width, height, crop=True): print (\"Capturing website", "self._screenshot.crop((0,0, width, height)) print(\"Cropped the image to: {0} {1}\".format(width, height)) return self @property", "{1}\".format(width, height)) return self @property def image(self): return self._screenshot @property def image_bytes(self): bytesio", "url) driver = webdriver.PhantomJS() if width and height: driver.set_window_size(width, height) # go and", "the content at the url driver.get(url) # get the screenshot and make it", "None def capture(self, url, width, height, crop=True): print (\"Capturing website screenshot of: \"", "width and height: driver.set_window_size(width, height) # go and get the content at the", "io class WebsiteScreenshotGenerator(): def __init__(self): self._screenshot = None def capture(self, url, width, height,", "= None def capture(self, url, width, height, crop=True): print (\"Capturing website screenshot of:", "io.BytesIO() self._screenshot.save(bytesio, \"PNG\") bytesio.seek(0) return bytesio.getvalue() if __name__ == \"__main__\": import const g", "following dimensions: {0}\".format(self._screenshot.size)) if crop: # crop the image self._screenshot = self._screenshot.crop((0,0, width,", "import webdriver from PIL import Image import io class WebsiteScreenshotGenerator(): def __init__(self): self._screenshot", "get the screenshot and make it into a Pillow Image self._screenshot = Image.open(io.BytesIO(driver.get_screenshot_as_png()))", "screenshot of: \" + url) driver = webdriver.PhantomJS() if width and height: driver.set_window_size(width,", "the following dimensions: {0}\".format(self._screenshot.size)) if crop: # crop the image self._screenshot = self._screenshot.crop((0,0,", "from selenium import webdriver from PIL import Image import io class WebsiteScreenshotGenerator(): def", "width, height)) print(\"Cropped the image to: {0} {1}\".format(width, height)) return self @property def", "self._screenshot = Image.open(io.BytesIO(driver.get_screenshot_as_png())) print(\"Got a screenshot with the following dimensions: {0}\".format(self._screenshot.size)) if crop:", "if __name__ == \"__main__\": import const g = WebsiteScreenshotGenerator() #g.do_screen_capturing(const.ApodEclipsePage(), \"/Users/michaelheydt/thumbnail.png\", 500, 100)", "driver = webdriver.PhantomJS() if width and height: driver.set_window_size(width, height) # go and get", "get the content at the url driver.get(url) # get the screenshot and make", "# get the screenshot and make it into a Pillow Image self._screenshot =", "import Popen, PIPE from selenium import webdriver from PIL import Image import io", "and height: driver.set_window_size(width, height) # go and get the content at the url", "print(\"Cropped the image to: {0} {1}\".format(width, height)) return self @property def image(self): return", "{0} {1}\".format(width, height)) return self @property def image(self): return self._screenshot @property def image_bytes(self):", "width, height, crop=True): print (\"Capturing website screenshot of: \" + url) driver =", "image self._screenshot = self._screenshot.crop((0,0, width, height)) print(\"Cropped the image to: {0} {1}\".format(width, height))", "dimensions: {0}\".format(self._screenshot.size)) if crop: # crop the image self._screenshot = self._screenshot.crop((0,0, width, height))", "const g = WebsiteScreenshotGenerator() #g.do_screen_capturing(const.ApodEclipsePage(), \"/Users/michaelheydt/thumbnail.png\", 500, 100) g.do_screen_capturing(\"http://espn.go.com\", 500, 100) # need", "print(\"Got a screenshot with the following dimensions: {0}\".format(self._screenshot.size)) if crop: # crop the", "a screenshot with the following dimensions: {0}\".format(self._screenshot.size)) if crop: # crop the image", "into a Pillow Image self._screenshot = Image.open(io.BytesIO(driver.get_screenshot_as_png())) print(\"Got a screenshot with the following", "from subprocess import Popen, PIPE from selenium import webdriver from PIL import Image", "bytesio.getvalue() if __name__ == \"__main__\": import const g = WebsiteScreenshotGenerator() #g.do_screen_capturing(const.ApodEclipsePage(), \"/Users/michaelheydt/thumbnail.png\", 500,", "WebsiteScreenshotGenerator(): def __init__(self): self._screenshot = None def capture(self, url, width, height, crop=True): print", "return self._screenshot @property def image_bytes(self): bytesio = io.BytesIO() self._screenshot.save(bytesio, \"PNG\") bytesio.seek(0) return bytesio.getvalue()", "and get the content at the url driver.get(url) # get the screenshot and", "+ url) driver = webdriver.PhantomJS() if width and height: driver.set_window_size(width, height) # go", "self._screenshot = None def capture(self, url, width, height, crop=True): print (\"Capturing website screenshot", "print (\"Capturing website screenshot of: \" + url) driver = webdriver.PhantomJS() if width", "self._screenshot = self._screenshot.crop((0,0, width, height)) print(\"Cropped the image to: {0} {1}\".format(width, height)) return", "return self @property def image(self): return self._screenshot @property def image_bytes(self): bytesio = io.BytesIO()", "image to: {0} {1}\".format(width, height)) return self @property def image(self): return self._screenshot @property", "PIL import Image import io class WebsiteScreenshotGenerator(): def __init__(self): self._screenshot = None def", "webdriver from PIL import Image import io class WebsiteScreenshotGenerator(): def __init__(self): self._screenshot =", "from PIL import Image import io class WebsiteScreenshotGenerator(): def __init__(self): self._screenshot = None", "= webdriver.PhantomJS() if width and height: driver.set_window_size(width, height) # go and get the", "the screenshot and make it into a Pillow Image self._screenshot = Image.open(io.BytesIO(driver.get_screenshot_as_png())) print(\"Got", "@property def image_bytes(self): bytesio = io.BytesIO() self._screenshot.save(bytesio, \"PNG\") bytesio.seek(0) return bytesio.getvalue() if __name__", "url driver.get(url) # get the screenshot and make it into a Pillow Image", "bytesio = io.BytesIO() self._screenshot.save(bytesio, \"PNG\") bytesio.seek(0) return bytesio.getvalue() if __name__ == \"__main__\": import", "import Image import io class WebsiteScreenshotGenerator(): def __init__(self): self._screenshot = None def capture(self,", "go and get the content at the url driver.get(url) # get the screenshot", "the image to: {0} {1}\".format(width, height)) return self @property def image(self): return self._screenshot", "if width and height: driver.set_window_size(width, height) # go and get the content at", "g = WebsiteScreenshotGenerator() #g.do_screen_capturing(const.ApodEclipsePage(), \"/Users/michaelheydt/thumbnail.png\", 500, 100) g.do_screen_capturing(\"http://espn.go.com\", 500, 100) # need to", "self @property def image(self): return self._screenshot @property def image_bytes(self): bytesio = io.BytesIO() self._screenshot.save(bytesio,", "def image_bytes(self): bytesio = io.BytesIO() self._screenshot.save(bytesio, \"PNG\") bytesio.seek(0) return bytesio.getvalue() if __name__ ==", "content at the url driver.get(url) # get the screenshot and make it into", "\"__main__\": import const g = WebsiteScreenshotGenerator() #g.do_screen_capturing(const.ApodEclipsePage(), \"/Users/michaelheydt/thumbnail.png\", 500, 100) g.do_screen_capturing(\"http://espn.go.com\", 500, 100)", "website screenshot of: \" + url) driver = webdriver.PhantomJS() if width and height:", "@property def image(self): return self._screenshot @property def image_bytes(self): bytesio = io.BytesIO() self._screenshot.save(bytesio, \"PNG\")", "Pillow Image self._screenshot = Image.open(io.BytesIO(driver.get_screenshot_as_png())) print(\"Got a screenshot with the following dimensions: {0}\".format(self._screenshot.size))", "url, width, height, crop=True): print (\"Capturing website screenshot of: \" + url) driver", "it into a Pillow Image self._screenshot = Image.open(io.BytesIO(driver.get_screenshot_as_png())) print(\"Got a screenshot with the", "the url driver.get(url) # get the screenshot and make it into a Pillow", "def __init__(self): self._screenshot = None def capture(self, url, width, height, crop=True): print (\"Capturing", "make it into a Pillow Image self._screenshot = Image.open(io.BytesIO(driver.get_screenshot_as_png())) print(\"Got a screenshot with", "{0}\".format(self._screenshot.size)) if crop: # crop the image self._screenshot = self._screenshot.crop((0,0, width, height)) print(\"Cropped", "= self._screenshot.crop((0,0, width, height)) print(\"Cropped the image to: {0} {1}\".format(width, height)) return self", "Image import io class WebsiteScreenshotGenerator(): def __init__(self): self._screenshot = None def capture(self, url,", "height)) print(\"Cropped the image to: {0} {1}\".format(width, height)) return self @property def image(self):" ]
[ "box, accept it and assert its text. sst.actions.click_button('show-prompt', wait=False) sst.actions.accept_alert(u'JavaScript prompt text', 'Entered", "text to a prompt box and dismiss it. sst.actions.click_button('show-prompt', wait=False) sst.actions.dismiss_alert(text_to_write='Entered text') sst.actions.assert_title('Page", "sst.DEVSERVER_PORT) sst.actions.go_to('/alerts') # Accept an alert box and assert its text. sst.actions.click_button('show-alert', wait=False)", "dismissed') # Enter text to a prompt box, accept it and assert its", "and assert its text. sst.actions.click_button('show-alert', wait=False) sst.actions.accept_alert(u'JavaScript alert text') sst.actions.assert_title('Page with JavaScript alerts')", "accept it and assert its text. sst.actions.click_button('show-prompt', wait=False) sst.actions.accept_alert(u'JavaScript prompt text', 'Entered text')", "Accept a confirm box. sst.actions.click_button('show-confirm', wait=False) sst.actions.accept_alert() sst.actions.accept_alert(u'Confirm accepted') # Dismiss a confirm", "confirm text') sst.actions.accept_alert(u'Confirm dismissed') # Enter text to a prompt box, accept it", "== 'phantomjs': sst.actions.skip() sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT) sst.actions.go_to('/alerts') # Accept an alert box and", "sst.actions.click_button('show-alert', wait=False) sst.actions.accept_alert(u'JavaScript alert text') sst.actions.assert_title('Page with JavaScript alerts') # Accept a confirm", "it and assert its text. sst.actions.click_button('show-prompt', wait=False) sst.actions.accept_alert(u'JavaScript prompt text', 'Entered text') sst.actions.accept_alert('Entered", "wait=False) sst.actions.accept_alert(u'JavaScript prompt text', 'Entered text') sst.actions.accept_alert('Entered text') # Enter text to a", "# Enter text to a prompt box, accept it and assert its text.", "wait=False) sst.actions.dismiss_alert(u'JavaScript confirm text') sst.actions.accept_alert(u'Confirm dismissed') # Enter text to a prompt box,", "not do alerts by design if config.browser_type == 'phantomjs': sst.actions.skip() sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT)", "with JavaScript alerts') # Accept a confirm box. sst.actions.click_button('show-confirm', wait=False) sst.actions.accept_alert() sst.actions.accept_alert(u'Confirm accepted')", "# Enter text to a prompt box and dismiss it. sst.actions.click_button('show-prompt', wait=False) sst.actions.dismiss_alert(text_to_write='Entered", "sst.actions.assert_title('Page with JavaScript alerts') # Accept a confirm box. sst.actions.click_button('show-confirm', wait=False) sst.actions.accept_alert() sst.actions.accept_alert(u'Confirm", "box and assert its text. sst.actions.click_button('show-confirm', wait=False) sst.actions.dismiss_alert(u'JavaScript confirm text') sst.actions.accept_alert(u'Confirm dismissed') #", "an alert box and assert its text. sst.actions.click_button('show-alert', wait=False) sst.actions.accept_alert(u'JavaScript alert text') sst.actions.assert_title('Page", "sst.actions.accept_alert(u'Confirm accepted') # Dismiss a confirm box and assert its text. sst.actions.click_button('show-confirm', wait=False)", "import sst import sst.actions from sst import config # PhantomJS can not do", "alerts') # Accept a confirm box. sst.actions.click_button('show-confirm', wait=False) sst.actions.accept_alert() sst.actions.accept_alert(u'Confirm accepted') # Dismiss", "sst.actions.accept_alert('Entered text') # Enter text to a prompt box and dismiss it. sst.actions.click_button('show-prompt',", "from sst import config # PhantomJS can not do alerts by design if", "to a prompt box, accept it and assert its text. sst.actions.click_button('show-prompt', wait=False) sst.actions.accept_alert(u'JavaScript", "sst import config # PhantomJS can not do alerts by design if config.browser_type", "and assert its text. sst.actions.click_button('show-confirm', wait=False) sst.actions.dismiss_alert(u'JavaScript confirm text') sst.actions.accept_alert(u'Confirm dismissed') # Enter", "box. sst.actions.click_button('show-confirm', wait=False) sst.actions.accept_alert() sst.actions.accept_alert(u'Confirm accepted') # Dismiss a confirm box and assert", "JavaScript alerts') # Accept a confirm box. sst.actions.click_button('show-confirm', wait=False) sst.actions.accept_alert() sst.actions.accept_alert(u'Confirm accepted') #", "# Dismiss a confirm box and assert its text. sst.actions.click_button('show-confirm', wait=False) sst.actions.dismiss_alert(u'JavaScript confirm", "prompt box, accept it and assert its text. sst.actions.click_button('show-prompt', wait=False) sst.actions.accept_alert(u'JavaScript prompt text',", "text to a prompt box, accept it and assert its text. sst.actions.click_button('show-prompt', wait=False)", "a prompt box and dismiss it. sst.actions.click_button('show-prompt', wait=False) sst.actions.dismiss_alert(text_to_write='Entered text') sst.actions.assert_title('Page with JavaScript", "'Entered text') sst.actions.accept_alert('Entered text') # Enter text to a prompt box and dismiss", "sst.actions.accept_alert(u'Confirm dismissed') # Enter text to a prompt box, accept it and assert", "alert box and assert its text. sst.actions.click_button('show-alert', wait=False) sst.actions.accept_alert(u'JavaScript alert text') sst.actions.assert_title('Page with", "Dismiss a confirm box and assert its text. sst.actions.click_button('show-confirm', wait=False) sst.actions.dismiss_alert(u'JavaScript confirm text')", "its text. sst.actions.click_button('show-confirm', wait=False) sst.actions.dismiss_alert(u'JavaScript confirm text') sst.actions.accept_alert(u'Confirm dismissed') # Enter text to", "a confirm box. sst.actions.click_button('show-confirm', wait=False) sst.actions.accept_alert() sst.actions.accept_alert(u'Confirm accepted') # Dismiss a confirm box", "wait=False) sst.actions.accept_alert() sst.actions.accept_alert(u'Confirm accepted') # Dismiss a confirm box and assert its text.", "text. sst.actions.click_button('show-confirm', wait=False) sst.actions.dismiss_alert(u'JavaScript confirm text') sst.actions.accept_alert(u'Confirm dismissed') # Enter text to a", "box and assert its text. sst.actions.click_button('show-alert', wait=False) sst.actions.accept_alert(u'JavaScript alert text') sst.actions.assert_title('Page with JavaScript", "alerts by design if config.browser_type == 'phantomjs': sst.actions.skip() sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT) sst.actions.go_to('/alerts') #", "do alerts by design if config.browser_type == 'phantomjs': sst.actions.skip() sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT) sst.actions.go_to('/alerts')", "import sst.actions from sst import config # PhantomJS can not do alerts by", "Enter text to a prompt box, accept it and assert its text. sst.actions.click_button('show-prompt',", "sst.actions.skip() sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT) sst.actions.go_to('/alerts') # Accept an alert box and assert its", "% sst.DEVSERVER_PORT) sst.actions.go_to('/alerts') # Accept an alert box and assert its text. sst.actions.click_button('show-alert',", "text', 'Entered text') sst.actions.accept_alert('Entered text') # Enter text to a prompt box and", "confirm box and assert its text. sst.actions.click_button('show-confirm', wait=False) sst.actions.dismiss_alert(u'JavaScript confirm text') sst.actions.accept_alert(u'Confirm dismissed')", "its text. sst.actions.click_button('show-prompt', wait=False) sst.actions.accept_alert(u'JavaScript prompt text', 'Entered text') sst.actions.accept_alert('Entered text') # Enter", "to a prompt box and dismiss it. sst.actions.click_button('show-prompt', wait=False) sst.actions.dismiss_alert(text_to_write='Entered text') sst.actions.assert_title('Page with", "'phantomjs': sst.actions.skip() sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT) sst.actions.go_to('/alerts') # Accept an alert box and assert", "wait=False) sst.actions.accept_alert(u'JavaScript alert text') sst.actions.assert_title('Page with JavaScript alerts') # Accept a confirm box.", "if config.browser_type == 'phantomjs': sst.actions.skip() sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT) sst.actions.go_to('/alerts') # Accept an alert", "Enter text to a prompt box and dismiss it. sst.actions.click_button('show-prompt', wait=False) sst.actions.dismiss_alert(text_to_write='Entered text')", "config # PhantomJS can not do alerts by design if config.browser_type == 'phantomjs':", "prompt text', 'Entered text') sst.actions.accept_alert('Entered text') # Enter text to a prompt box", "PhantomJS can not do alerts by design if config.browser_type == 'phantomjs': sst.actions.skip() sst.actions.set_base_url('http://localhost:%s/'", "assert its text. sst.actions.click_button('show-alert', wait=False) sst.actions.accept_alert(u'JavaScript alert text') sst.actions.assert_title('Page with JavaScript alerts') #", "# PhantomJS can not do alerts by design if config.browser_type == 'phantomjs': sst.actions.skip()", "confirm box. sst.actions.click_button('show-confirm', wait=False) sst.actions.accept_alert() sst.actions.accept_alert(u'Confirm accepted') # Dismiss a confirm box and", "design if config.browser_type == 'phantomjs': sst.actions.skip() sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT) sst.actions.go_to('/alerts') # Accept an", "sst.actions.click_button('show-confirm', wait=False) sst.actions.dismiss_alert(u'JavaScript confirm text') sst.actions.accept_alert(u'Confirm dismissed') # Enter text to a prompt", "# Accept an alert box and assert its text. sst.actions.click_button('show-alert', wait=False) sst.actions.accept_alert(u'JavaScript alert", "text') sst.actions.accept_alert('Entered text') # Enter text to a prompt box and dismiss it.", "sst.actions.go_to('/alerts') # Accept an alert box and assert its text. sst.actions.click_button('show-alert', wait=False) sst.actions.accept_alert(u'JavaScript", "can not do alerts by design if config.browser_type == 'phantomjs': sst.actions.skip() sst.actions.set_base_url('http://localhost:%s/' %", "# Accept a confirm box. sst.actions.click_button('show-confirm', wait=False) sst.actions.accept_alert() sst.actions.accept_alert(u'Confirm accepted') # Dismiss a", "sst.actions.click_button('show-prompt', wait=False) sst.actions.accept_alert(u'JavaScript prompt text', 'Entered text') sst.actions.accept_alert('Entered text') # Enter text to", "assert its text. sst.actions.click_button('show-confirm', wait=False) sst.actions.dismiss_alert(u'JavaScript confirm text') sst.actions.accept_alert(u'Confirm dismissed') # Enter text", "text') sst.actions.assert_title('Page with JavaScript alerts') # Accept a confirm box. sst.actions.click_button('show-confirm', wait=False) sst.actions.accept_alert()", "sst.actions.click_button('show-confirm', wait=False) sst.actions.accept_alert() sst.actions.accept_alert(u'Confirm accepted') # Dismiss a confirm box and assert its", "text') sst.actions.accept_alert(u'Confirm dismissed') # Enter text to a prompt box, accept it and", "sst.actions from sst import config # PhantomJS can not do alerts by design", "its text. sst.actions.click_button('show-alert', wait=False) sst.actions.accept_alert(u'JavaScript alert text') sst.actions.assert_title('Page with JavaScript alerts') # Accept", "sst.actions.accept_alert(u'JavaScript prompt text', 'Entered text') sst.actions.accept_alert('Entered text') # Enter text to a prompt", "sst.actions.accept_alert(u'JavaScript alert text') sst.actions.assert_title('Page with JavaScript alerts') # Accept a confirm box. sst.actions.click_button('show-confirm',", "text') # Enter text to a prompt box and dismiss it. sst.actions.click_button('show-prompt', wait=False)", "by design if config.browser_type == 'phantomjs': sst.actions.skip() sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT) sst.actions.go_to('/alerts') # Accept", "and assert its text. sst.actions.click_button('show-prompt', wait=False) sst.actions.accept_alert(u'JavaScript prompt text', 'Entered text') sst.actions.accept_alert('Entered text')", "text. sst.actions.click_button('show-prompt', wait=False) sst.actions.accept_alert(u'JavaScript prompt text', 'Entered text') sst.actions.accept_alert('Entered text') # Enter text", "config.browser_type == 'phantomjs': sst.actions.skip() sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT) sst.actions.go_to('/alerts') # Accept an alert box", "sst import sst.actions from sst import config # PhantomJS can not do alerts", "assert its text. sst.actions.click_button('show-prompt', wait=False) sst.actions.accept_alert(u'JavaScript prompt text', 'Entered text') sst.actions.accept_alert('Entered text') #", "import config # PhantomJS can not do alerts by design if config.browser_type ==", "accepted') # Dismiss a confirm box and assert its text. sst.actions.click_button('show-confirm', wait=False) sst.actions.dismiss_alert(u'JavaScript", "sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT) sst.actions.go_to('/alerts') # Accept an alert box and assert its text.", "sst.actions.dismiss_alert(u'JavaScript confirm text') sst.actions.accept_alert(u'Confirm dismissed') # Enter text to a prompt box, accept", "alert text') sst.actions.assert_title('Page with JavaScript alerts') # Accept a confirm box. sst.actions.click_button('show-confirm', wait=False)", "text. sst.actions.click_button('show-alert', wait=False) sst.actions.accept_alert(u'JavaScript alert text') sst.actions.assert_title('Page with JavaScript alerts') # Accept a", "a confirm box and assert its text. sst.actions.click_button('show-confirm', wait=False) sst.actions.dismiss_alert(u'JavaScript confirm text') sst.actions.accept_alert(u'Confirm", "a prompt box, accept it and assert its text. sst.actions.click_button('show-prompt', wait=False) sst.actions.accept_alert(u'JavaScript prompt", "prompt box and dismiss it. sst.actions.click_button('show-prompt', wait=False) sst.actions.dismiss_alert(text_to_write='Entered text') sst.actions.assert_title('Page with JavaScript alerts')", "Accept an alert box and assert its text. sst.actions.click_button('show-alert', wait=False) sst.actions.accept_alert(u'JavaScript alert text')", "sst.actions.accept_alert() sst.actions.accept_alert(u'Confirm accepted') # Dismiss a confirm box and assert its text. sst.actions.click_button('show-confirm'," ]
[ "result = np.roll(result, num_roll, axis=-1) result = result[:, :num_result] else: half_way = int(num_result", "* ts - ts * np.sin(ts * np.pi) elif ease == \"out\": ts", "result.squeeze() result = xr.DataArray( result, dims=da.dims, coords=coords, name=da.name, attrs=da.attrs, ) if \"stacked\" in", "= ( (363 / 40.0 * ts[index1] * ts[index1]) - (99 / 10.0", "num_steps - num_steps) result = ( pd.DataFrame( array, columns=np.arange(0, num_states * num_steps, num_steps),", "= np.repeat(array[:, :-1], num_steps, axis=-1) init_nans = np.isnan(init) init[init_nans] = 0 # temporarily", "* ts[~index] * ts[~index] + 1 ) return ts def _sine(self, ts, ease):", "is_xarray: if \"state\" not in da.dims: return da_origin ( da, name, dims, coords,", "(4 * ts[~index]) - 1 return ts def _cubic(self, ts, ease): if ease", "1: result_back = result[::-1] else: result_back = result[:, ::-1] if name == \"duration\"", "= result[::-1] else: result_back = result[:, ::-1] if name == \"duration\" and self.revert", "int(np.ceil(100 / num_states)) else: num_steps = self.frames with param.edit_constant(self): self.num_steps = num_steps num_result", "num_steps, num_items, num_result, name): if is_str(array): fill = \"\" dtype = np.object else:", "= 1 - (ts * ts * ts - ts * np.sin(ts *", "= param.Integer(doc=\"Number of states\", **DEFAULTS[\"num_kwds\"]) num_steps = param.Integer( doc=\"Number of frames between each", "- 1) * (1 - ts) + 1 elif ease == \"in_out\": index", "ts[~index] = 0 ts[index] = np.power(2, 10 * (ts[index] - 1)) elif ease", "\"batch\", \"state\", ...) else: da = da.transpose(item_dim, \"state\", ...) break dims = da.dims", "to the initial state, and \" \"rollback is like traceback, but disregards the", "cmap(np.arange(num_result))]) result = np.array(results) return result def _interp_text(self, array, num_states, num_steps, num_result): result", "!= 0: result = result[:, :-1] return result def _interp_time( self, array, conversion,", "ts < 0.5 ts[index] = 4 * ts[index] * ts[index] * ts[index] ts[~index]", "np.issubdtype(array_dtype, np.datetime64): result = self._interp_time(array, pd.to_datetime, *interp_args) elif np.issubdtype(array_dtype, np.timedelta64): result = self._interp_time(array,", "+ 1 return ts def _quartic(self, ts, ease): if ease == \"in\": ts", "ts elif ease == \"out\": ts = (ts - 1) * (ts -", "def _interp_text(self, array, num_states, num_steps, num_result): result = np.repeat(array, num_steps, axis=-1) num_roll =", "elif ease == \"out\": ts = np.sin(-13 * np.pi / 2 * (ts", "(99 / 10.0 * ts[index1]) + 17 / 5.0 ) ts[index2] = (", "num_steps = int(np.ceil(60 / num_states)) else: num_steps = int(np.ceil(100 / num_states)) else: num_steps", "result.values return result def _interp_color(self, array, num_result): results = [] for colors in", "ts[index1] = -0.5 * np.power(2, (-20 * ts[index1]) + 10) + 1 return", "(ts < 0.5) & (ts != 1) index1 = (ts != 0) &", "== \"out\": ts = np.sin(ts * np.pi / 2) elif ease == \"in_out\":", "is_bar = da.attrs.get(\"is_bar\") is_errorbar_morph = da.attrs.get(\"is_errorbar_morph\") return da, name, dims, coords, interp, ease,", "!= 1) ts[index0] = 0.5 * np.power(2, (20 * ts[index0]) - 10) ts[index1]", "if \"state\" not in da.dims: return da_origin ( da, name, dims, coords, interp,", "interp, ease, is_bar, is_errorbar_morph, ) = self._prep_xarray(da) array = self._prep_array(da) num_items, num_states, num_steps,", "ts[index] = 0.5 * ( ts[index] * ts[index] * ts[index] - ts[index] *", "= self._interp_numeric(array, *interp_args) elif name in \"c\": # must be after number result", "like traceback, but disregards the \" \"original's path durations\", precedence=PRECEDENCES[\"interp\"], ) num_states =", "result.ndim == 1: result_back = result[::-1] else: result_back = result[:, ::-1] if name", "\"boomerang finds the shortest path to the initial state, \" \"traceback backtracks the", "num_roll, axis=-1) result = result[:, :num_result] else: half_way = int(num_result / 2) result", "/ 2 * ((2 * ts[~index] - 1) + 1)) * np.power(2, -10", "- 1) + 1)) * np.power(2, -10 * (2 * ts[~index] - 1))", "index = ts < 0.5 ts[index] = 4 * ts[index] * ts[index] *", "replace nans return result def _linear(self, ts, ease): return ts def _quadratic(self, ts,", "ts[index] * ts[index] * ts[index] ts[~index] = 2 * ts[~index] - 2 ts[~index]", "10) & ~index1 & ~index0 index3 = ts >= 9 / 10 ts[index0]", "not in da.dims: return da_origin ( da, name, dims, coords, interp, ease, is_bar,", "np.pi / 2) + 1 elif ease == \"out\": ts = np.sin(ts *", "result, da, dims, coords): if len(dims) == 1: result = result.squeeze() result =", "initial state; \" \"boomerang finds the shortest path to the initial state, \"", "ts[~index] - 2 ts[~index] = 0.5 * ts[~index] * ts[~index] * ts[~index] +", "== \"central_longitude\": interp = \"linear\" result = self._interp_numeric(array, *interp_args) elif name in \"c\":", "+ 1)) * np.power(2, -10 * ts) + 1 elif ease == \"in_out\":", "np.nan # replace nans return result def _linear(self, ts, ease): return ts def", "ts[index0] = 121 * ts[index0] * ts[index0] / 16 ts[index1] = ( (363", "(2 * ts[~index]) - 2 ts[~index] = ( 0.5 * ts[~index] * ts[~index]", "* ts[index] * ts[index] * ts[index] * ts[index] * ts[index] ts[~index] = (2", "\" \"boomerang finds the shortest path to the initial state, \" \"traceback backtracks", "(2 * ts[~index] - 1) ts[~index] = ( 0.5 * ( 1 -", "name = da.name interp = da.attrs.get(\"interp\") ease = da.attrs.get(\"ease\") for item_dim in da.dims:", "= np.array(da) if array.ndim == 1: array = array[np.newaxis, :] if self.revert ==", "= array.dtype if name in [\"duration\", \"remark\", \"xerr\", \"yerr\"] and not is_errorbar_morph: result", "init = np.repeat(array[:, :-1], num_steps, axis=-1) init_nans = np.isnan(init) init[init_nans] = 0 #", "* np.sin(ts * np.pi)) elif ease == \"in_out\": index = ts < 0.5", ":] result = np.hstack([result, result_back]) return result def _rebuild_da(self, result, da, dims, coords):", "\"in\": ts = ts * ts * ts - ts * np.sin(ts *", "10 * (ts - 1)) elif ease == \"out\": ts = np.sin(-13 *", "np.linspace(0, 1, num_steps) interp_args = (steps, interp, ease, num_states, num_steps, num_items) array_dtype =", "da): name = da.name interp = da.attrs.get(\"interp\") ease = da.attrs.get(\"ease\") for item_dim in", "name) elif np.issubdtype(array_dtype, np.datetime64): result = self._interp_time(array, pd.to_datetime, *interp_args) elif np.issubdtype(array_dtype, np.timedelta64): result", ".configuration import DEFAULTS, EASES, INTERPS, PRECEDENCES, REVERTS from .util import is_str class Easing(param.Parameterized):", "if ease == \"in\": ts = ts * ts * ts * ts", "num_steps, axis=-1) num_roll = -int(np.ceil(num_steps / num_states * 2)) if num_states > 2:", "== \"in\": ts = ts * ts * ts * ts elif ease", "# (1, num_states) return result def _interp_fill(self, array, num_states, num_steps, name): indices =", "dims, coords, interp, ease, is_bar, is_errorbar_morph, ) = self._prep_xarray(da) array = self._prep_array(da) num_items,", "results.append([rgb2hex(rgb) for rgb in cmap(np.arange(num_result))]) result = np.array(results) return result def _interp_text(self, array,", "ease): if ease == \"in\": index = ts != 0 ts[~index] = 0", "from .configuration import DEFAULTS, EASES, INTERPS, PRECEDENCES, REVERTS from .util import is_str class", "!= 0) & (ts < 0.5) & (ts != 1) index1 = (ts", "5.0 * ts[index3] * ts[index3]) - (513 / 25.0 * ts[index3]) + 268", "= 0.5 * ( ts[index] * ts[index] * ts[index] - ts[index] * np.sin(ts[index]", "-10 * ts) + 1 elif ease == \"in_out\": index = ts <", "is_errorbar_morph: result = self._interp_first( array, num_states, num_steps, num_items, num_result, name ) elif interp", "10: num_steps = int(np.ceil(60 / num_states)) else: num_steps = int(np.ceil(100 / num_states)) else:", "ts = 1 - ts elif ease == \"out\": pass elif ease ==", "ts) * ts) elif ease == \"in_out\": index = ts < 0.5 ts[index]", "result def _interp_first(self, array, num_states, num_steps, num_items, num_result, name): if is_str(array): fill =", "ts[index1]) + 17 / 5.0 ) ts[index2] = ( (4356 / 361.0 *", "= 0.0 dtype = None result = np.full((num_items, num_result), fill, dtype=dtype) indices =", "- (35442 / 1805.0 * ts[index2]) + 16061 / 1805.0 ) ts[index3] =", "if self.frames is None: if num_states < 10: num_steps = int(np.ceil(60 / num_states))", "\"in_out\": index = ts < 0.5 ts[index] = 2 * ts[index] ts[index] =", "init[init_nans] = 0 # temporarily fill the nans stop = np.repeat(array[:, 1:], num_steps,", "\"out\": pass elif ease == \"in_out\": ts[index] = 0.5 * (1 - ts[index])", "): result = self._interp_fill(array, num_states, num_steps, name) elif np.issubdtype(array_dtype, np.datetime64): result = self._interp_time(array,", ") ) + 0.5 ) return ts def _bounce(self, ts, ease): index =", "result = stop * weights + init * (1 - weights) result[init_nans |", "ts[index3]) + 268 / 25.0 ) if ease == \"in\": ts = 1", "* ts[index] ts[~index] = 2 * ts[~index] - 2 ts[~index] = 0.5 *", "= self._interp_first( array, num_states, num_steps, num_items, num_result, name ) elif interp == \"fill\"", "= -(ts * (ts - 2)) elif ease == \"in_out\": index = ts", "if ease == \"in\": ts = 1 - ts elif ease == \"out\":", "*interp_args) elif name in \"c\": # must be after number result = self._interp_color(array,", "= np.repeat(1 / 60, result_back.shape[-1])[np.newaxis, :] result = np.hstack([result, result_back]) return result def", "ease): index = ts < 0.5 if ease == \"in\": ts = 1", "name): if is_str(array): fill = \"\" dtype = np.object else: fill = 0.0", ") .T.reindex(indices) .T ) if not name.endswith(\"discrete_trail\"): result = result.ffill(axis=1).fillna(\"\").values result[:, -1] =", "is like traceback, but disregards the \" \"original's path durations\", precedence=PRECEDENCES[\"interp\"], ) num_states", "1 - np.sqrt(1 - (ts * ts)) elif ease == \"out\": ts =", "result[init_nans | stop_nans] = np.nan # replace nans return result def _linear(self, ts,", "self.frames is None: if num_states < 10: num_steps = int(np.ceil(60 / num_states)) else:", "ts < 0.5 if ease == \"in\": ts = 1 - ts elif", "\"in_out\": index = ts < 0.5 ts[index] = 0.5 * (1 - np.sqrt(1", "1 elif ease == \"in_out\": index = ts < 0.5 ts[index] = 4", "1)) + 1 ) return ts def _exponential(self, ts, ease): if ease ==", "ts[index] ts[~index] = ts[~index] - 1 ts[~index] = -8 * ts[~index] * ts[~index]", "* weights + init * (1 - weights) result[init_nans | stop_nans] = np.nan", "num_result): result = np.repeat(array, num_steps, axis=-1) num_roll = -int(np.ceil(num_steps / num_states * 2))", "\"grid_x\"]}) elif \"batch\" in dims: da = da.stack({\"stacked\": [item_dim, \"batch\"]}) da = da.transpose(\"stacked\",", "ease == \"in\": ts = np.sin(13 * np.pi / 2 * ts) *", "da, dims, coords): if len(dims) == 1: result = result.squeeze() result = xr.DataArray(", "ts[~index] = (-2 * ts[~index] * ts[~index]) + (4 * ts[~index]) - 1", "* np.pi / 2) elif ease == \"in_out\": ts = 0.5 * (1", "268 / 25.0 ) if ease == \"in\": ts = 1 - ts", "\"state\") coords = da.drop_vars(\"state\", errors=\"ignore\").coords is_bar = da.attrs.get(\"is_bar\") is_errorbar_morph = da.attrs.get(\"is_errorbar_morph\") return da,", "np.repeat(1 / 60, result_back.shape[-1])[np.newaxis, :] result = np.hstack([result, result_back]) return result def _rebuild_da(self,", "= \"\" dtype = np.object else: fill = 0.0 dtype = None result", "== \"rollback\": result_back = np.repeat(1 / 60, result_back.shape[-1])[np.newaxis, :] result = np.hstack([result, result_back])", ") def __init__(self, **kwds): super().__init__(**kwds) def interpolate(self, da, name=\"\"): interp = self.interp or", "= da.transpose(item_dim, \"state\", ...) break dims = da.dims if da.ndim > 2: #", "weights) result[init_nans | stop_nans] = np.nan # replace nans return result def _linear(self,", "of frames between each base state\", **DEFAULTS[\"num_kwds\"] ) def __init__(self, **kwds): super().__init__(**kwds) def", "indices = np.arange(num_states * num_steps - num_steps) result = ( pd.DataFrame( array, columns=np.arange(0,", "!= 0 ts[~index] = 0 ts[index] = np.power(2, 10 * (ts[index] - 1))", "25.0 ) if ease == \"in\": ts = 1 - ts elif ease", "1) * (ts - 1) * (ts - 1) + 1 elif ease", "- 1) * (ts - 1) + 1 elif ease == \"in_out\": index", "- 3) * ((2 * ts[~index]) - 1)) + 1 ) return ts", "interp_args = (steps, interp, ease, num_states, num_steps, num_items) array_dtype = array.dtype if name", "num_items ): array = array.astype(float) result = self._interp_numeric( array, steps, interp, ease, num_states,", "ts = np.sqrt((2 - ts) * ts) elif ease == \"in_out\": index =", "+ 1)) * np.power(2, -10 * (2 * ts[~index] - 1)) + 2", "elif ease == \"out\": ts = np.sqrt((2 - ts) * ts) elif ease", "the initial state; \" \"boomerang finds the shortest path to the initial state,", "* ts[~index] * ts[~index]) + (4 * ts[~index]) - 1 return ts def", "disregards the \" \"original's path durations\", precedence=PRECEDENCES[\"interp\"], ) num_states = param.Integer(doc=\"Number of states\",", "= self._interp_numeric( array, steps, interp, ease, num_states, num_steps, num_items ) result = conversion(result.ravel()).values", "361.0 * ts[index2] * ts[index2]) - (35442 / 1805.0 * ts[index2]) + 16061", "= -int(np.ceil(num_steps / num_states * 2)) if num_states > 2: result = np.roll(result,", "and \" \"rollback is like traceback, but disregards the \" \"original's path durations\",", "initial state, and \" \"rollback is like traceback, but disregards the \" \"original's", "weights + init * (1 - weights) result[init_nans | stop_nans] = np.nan #", "0.5 * (1 - ts[index]) ts[~index] = 0.5 * ts[~index] + 0.5 return", "_interp_numeric( self, array, steps, interp, ease, num_states, num_steps, num_items ): init = np.repeat(array[:,", "def _quartic(self, ts, ease): if ease == \"in\": ts = ts * ts", "str result = self._interp_text(array, num_states, num_steps, num_result) if self.revert in [\"traceback\", \"rollback\"]: result", "num_result = self._calc_shapes(array) if (num_steps == 1 or num_states == 1) and self.revert", "= np.sqrt((2 - ts) * ts) elif ease == \"in_out\": index = ts", "return da_origin ( da, name, dims, coords, interp, ease, is_bar, is_errorbar_morph, ) =", "4 * ts[index] * ts[index] * ts[index] ts[~index] = 2 * ts[~index] -", "- 1) * np.pi / 2) + 1 elif ease == \"out\": ts", "= stop * weights + init * (1 - weights) result[init_nans | stop_nans]", "0.5 * ts[~index] * ts[~index] * ts[~index] * ts[~index] * ts[~index] + 1", "index0 = (ts != 0) & (ts < 0.5) & (ts != 1)", "finds the shortest path to the initial state, \" \"traceback backtracks the original", "(-2 * ts[~index] * ts[~index]) + (4 * ts[~index]) - 1 return ts", "* ts[index1] * ts[index1]) - (99 / 10.0 * ts[index1]) + 17 /", "return ts def _sine(self, ts, ease): if ease == \"in\": ts = np.sin((ts", "ts[index0] * ts[index0] / 16 ts[index1] = ( (363 / 40.0 * ts[index1]", ") return ts def _bounce(self, ts, ease): index = ts < 0.5 if", "| stop_nans] = np.nan # replace nans return result def _linear(self, ts, ease):", "* ts[~index] * ts[~index] * ts[~index] * ts[~index] + 1 return ts def", "ts - ts * np.sin(ts * np.pi) elif ease == \"out\": ts =", "= self._prep_xarray(da) array = self._prep_array(da) num_items, num_states, num_steps, num_result = self._calc_shapes(array) if (num_steps", "np.repeat(array[:, :-1], num_steps, axis=-1) init_nans = np.isnan(init) init[init_nans] = 0 # temporarily fill", "== \"in\": ts = np.sin((ts - 1) * np.pi / 2) + 1", "* ts elif ease == \"out\": ts = -(ts * (ts - 2))", "dtype = np.object else: fill = 0.0 dtype = None result = np.full((num_items,", "np.pi / 2 * ts) * np.power(2, 10 * (ts - 1)) elif", "* (ts[index] * ts[index]))) ts[~index] = 0.5 * ( np.sqrt(-((2 * ts[~index]) -", "= 1 ts[index] = 1 - np.power(2, -10 * ts[index]) elif ease ==", "return ts def _bounce(self, ts, ease): index = ts < 0.5 if ease", "== \"in_out\": index = ts < 0.5 ts[index] = 8 * ts[index] *", "+ init * (1 - weights) result[init_nans | stop_nans] = np.nan # replace", "ease == \"in\": ts = ts * ts * ts elif ease ==", "0.5 * (1 - np.sqrt(1 - 4 * (ts[index] * ts[index]))) ts[~index] =", "10) + 1 return ts def _elastic(self, ts, ease): if ease == \"in\":", ") if not name.endswith(\"discrete_trail\"): result = result.ffill(axis=1).fillna(\"\").values result[:, -1] = array[:, -1] else:", "- ts elif ease == \"out\": pass elif ease == \"in_out\": ts[index] =", "result = result.values return result def _interp_color(self, array, num_result): results = [] for", "* ts * ts elif ease == \"out\": ts = (ts - 1)", "\"in\": ts = ts * ts * ts * ts elif ease ==", "= int(np.ceil(100 / num_states)) else: num_steps = self.frames with param.edit_constant(self): self.num_steps = num_steps", "da.attrs.get(\"interp\") ease = da.attrs.get(\"ease\") for item_dim in da.dims: if \"item\" in item_dim: if", "def _quintic(self, ts, ease): if ease == \"in\": ts = ts * ts", "interp = param.ClassSelector( default=None, class_=Iterable, doc=f\"Interpolation method; {INTERPS}\", precedence=PRECEDENCES[\"interp\"], ) ease = param.ClassSelector(", "elif ease == \"in_out\": index = ts < 0.5 ts[index] = 4 *", "= 0.5 * (1 - ts[index]) ts[~index] = 0.5 * ts[~index] + 0.5", "ts != 1 ts[~index] = 1 ts[index] = 1 - np.power(2, -10 *", "- ts) * ts) elif ease == \"in_out\": index = ts < 0.5", "\"in_out\": index = ts < 0.5 ts[index] = 16 * ts[index] * ts[index]", "da.ndim > 2: # more than (item, state) if \"grid_item\" in dims: da", "= da.stack({\"stacked\": [\"grid_item\", \"grid_y\", \"grid_x\"]}) elif \"batch\" in dims: da = da.stack({\"stacked\": [item_dim,", "np.pi)) return ts def _circular(self, ts, ease): if ease == \"in\": ts =", "self._interp_time(array, pd.to_timedelta, *interp_args) elif np.issubdtype(array_dtype, np.number) and not is_bar: if name == \"central_longitude\":", "\"yerr\"] and not is_errorbar_morph: result = self._interp_first( array, num_states, num_steps, num_items, num_result, name", "is None: return da_origin steps = np.linspace(0, 1, num_steps) interp_args = (steps, interp,", "result = result[:, :num_result] else: half_way = int(num_result / 2) result = result[:,", "def _sine(self, ts, ease): if ease == \"in\": ts = np.sin((ts - 1)", "ts = 1 - np.sqrt(1 - (ts * ts)) elif ease == \"out\":", "np.sin((ts - 1) * np.pi / 2) + 1 elif ease == \"out\":", "stop_nans = np.isnan(stop) tiled_steps = np.tile(steps, (num_states - 1) * num_items).reshape( num_items, -1", "default=None, objects=REVERTS, doc=\"Method for reverting to the initial state; \" \"boomerang finds the", "result = self._interp_fill(array, num_states, num_steps, name) elif np.issubdtype(array_dtype, np.datetime64): result = self._interp_time(array, pd.to_datetime,", "original path to the initial state, and \" \"rollback is like traceback, but", "= da.copy() is_xarray = isinstance(da, xr.DataArray) is_bar = False if is_xarray: if \"state\"", "* ts[index2]) + 16061 / 1805.0 ) ts[index3] = ( (54 / 5.0", "ts * ts * ts - ts * np.sin(ts * np.pi) elif ease", "= 2 * ts[index] * ts[index] ts[~index] = (-2 * ts[~index] * ts[~index])", "doc=\"Number of frames between each base state\", **DEFAULTS[\"num_kwds\"] ) def __init__(self, **kwds): super().__init__(**kwds)", "* (ts - 1) * (ts - 1) + 1 elif ease ==", "is_bar = False if is_xarray: if \"state\" not in da.dims: return da_origin (", "num_steps, num_items ): array = array.astype(float) result = self._interp_numeric( array, steps, interp, ease,", "isinstance(da, xr.DataArray) is_bar = False if is_xarray: if \"state\" not in da.dims: return", "or \"cubic\" ease = self.ease da_origin = da.copy() is_xarray = isinstance(da, xr.DataArray) is_bar", "ts[index] * ts[index] * ts[index] ts[~index] = (2 * ts[~index]) - 2 ts[~index]", "ease == \"out\": index = ts != 1 ts[~index] = 1 ts[index] =", "False if is_xarray: if \"state\" not in da.dims: return da_origin ( da, name,", "self._interp_text(array, num_states, num_steps, num_result) if self.revert in [\"traceback\", \"rollback\"]: result = self._apply_revert(result, name)", ") revert = param.ObjectSelector( default=None, objects=REVERTS, doc=\"Method for reverting to the initial state;", "array, columns=np.arange(0, num_states * num_steps, num_steps), ) .T.reindex(indices) .T ) if not name.endswith(\"discrete_trail\"):", "frames between each base state\", **DEFAULTS[\"num_kwds\"] ) def __init__(self, **kwds): super().__init__(**kwds) def interpolate(self,", "ts[~index] * ts[~index] - ts[~index] * np.sin(ts[~index] * np.pi) ) ) + 0.5", "== 1: array = array[np.newaxis, :] if self.revert == \"boomerang\": array = np.hstack([array,", "ts[index2]) + 16061 / 1805.0 ) ts[index3] = ( (54 / 5.0 *", "< 8 / 11) & ~index0 index2 = (ts < 9 / 10)", "ts[~index] = ( 0.5 * ( 1 - ( ts[~index] * ts[~index] *", "1) * np.pi / 2) + 1 elif ease == \"out\": ts =", "...) else: da = da.transpose(item_dim, \"state\", ...) break dims = da.dims if da.ndim", "if ease == \"in\": ts = ts * ts * ts - ts", "* ts[~index]) - 2 ts[~index] = ( 0.5 * ts[~index] * ts[~index] *", "* ts[index] * ts[index] ts[~index] = (-2 * ts[~index] * ts[~index]) + (4", "- np.power(2, -10 * ts[index]) elif ease == \"in_out\": index0 = (ts !=", "array.dtype if name in [\"duration\", \"remark\", \"xerr\", \"yerr\"] and not is_errorbar_morph: result =", "= 16 * ts[index] * ts[index] * ts[index] * ts[index] * ts[index] ts[~index]", "= 2 * ts[~index] - 2 ts[~index] = 0.5 * ts[~index] * ts[~index]", "0.5 ts[index] = ( 0.5 * np.sin(13 * np.pi / 2 * (2", "2 * ts[index] ts[index] = 0.5 * ( ts[index] * ts[index] * ts[index]", "index2 = (ts < 9 / 10) & ~index1 & ~index0 index3 =", "= result.ffill(axis=1).fillna(\"\").values result[:, -1] = array[:, -1] else: result = result.values return result", "result = xr.DataArray( result, dims=da.dims, coords=coords, name=da.name, attrs=da.attrs, ) if \"stacked\" in result.dims:", "in cmap(np.arange(num_result))]) result = np.array(results) return result def _interp_text(self, array, num_states, num_steps, num_result):", "* num_steps, num_steps), ) .T.reindex(indices) .T ) if not name.endswith(\"discrete_trail\"): result = result.ffill(axis=1).fillna(\"\").values", "if self.revert in [\"traceback\", \"rollback\"]: result = self._apply_revert(result, name) if is_xarray: result =", "(steps, interp, ease, num_states, num_steps, num_items) array_dtype = array.dtype if name in [\"duration\",", "not is_errorbar_morph: result = self._interp_first( array, num_states, num_steps, num_items, num_result, name ) elif", "array = self._prep_array(da) num_items, num_states, num_steps, num_result = self._calc_shapes(array) if (num_steps == 1", "result = self._apply_revert(result, name) if is_xarray: result = self._rebuild_da(result, da, dims, coords) return", "ease == \"out\": ts = np.sin(ts * np.pi / 2) elif ease ==", "== \"in_out\": index0 = (ts != 0) & (ts < 0.5) & (ts", "interp, ease, num_states, num_steps, num_items ): init = np.repeat(array[:, :-1], num_steps, axis=-1) init_nans", "elif ease == \"out\": ts = np.sin(ts * np.pi / 2) elif ease", "11) & ~index0 index2 = (ts < 9 / 10) & ~index1 &", "\"in_out\": index = ts < 0.5 ts[index] = 2 * ts[index] * ts[index]", "None: if num_states < 10: num_steps = int(np.ceil(60 / num_states)) else: num_steps =", "== \"in_out\": index = ts < 0.5 ts[index] = ( 0.5 * np.sin(13", "(ts >= 0.5) & (ts != 1) ts[index0] = 0.5 * np.power(2, (20", "return ts def _circular(self, ts, ease): if ease == \"in\": ts = 1", "== \"in_out\": index = ts < 0.5 ts[index] = 2 * ts[index] ts[index]", "result = result[:, :-1] return result def _interp_time( self, array, conversion, steps, interp,", "\"morph_trail\", \"tick_label\", \"bar_label\") ): result = self._interp_fill(array, num_states, num_steps, name) elif np.issubdtype(array_dtype, np.datetime64):", "1 ts[~index] = 1 ts[index] = 1 - np.power(2, -10 * ts[index]) elif", "name) else: # str result = self._interp_text(array, num_states, num_steps, num_result) if self.revert in", "= da.dims if da.ndim > 2: # more than (item, state) if \"grid_item\"", "\"grid_y\", \"grid_x\"]}) elif \"batch\" in dims: da = da.stack({\"stacked\": [item_dim, \"batch\"]}) da =", "ts[~index] * ts[~index] * ts[~index] * ts[~index] + 1 return ts def _quintic(self,", "2) + 1 elif ease == \"out\": ts = np.sin(ts * np.pi /", "index3 = ts >= 9 / 10 ts[index0] = 121 * ts[index0] *", "np.power(2, 10 * (ts - 1)) elif ease == \"out\": ts = np.sin(-13", "= self.interp or \"cubic\" ease = self.ease da_origin = da.copy() is_xarray = isinstance(da,", "1 ) return ts def _sine(self, ts, ease): if ease == \"in\": ts", "init * (1 - weights) result[init_nans | stop_nans] = np.nan # replace nans", "ts[~index] - 1 ts[~index] = -8 * ts[~index] * ts[~index] * ts[~index] *", "+ 2 ) return ts def _back(self, ts, ease): if ease == \"in\":", "num_steps indices[-1] -= 1 result[:, indices] = array # (1, num_states) return result", "state, \" \"traceback backtracks the original path to the initial state, and \"", "dims: da = da.stack({\"stacked\": [item_dim, \"batch\"]}) da = da.transpose(\"stacked\", \"state\") coords = da.drop_vars(\"state\",", "colors, N=num_result) results.append([rgb2hex(rgb) for rgb in cmap(np.arange(num_result))]) result = np.array(results) return result def", "interp = self.interp or \"cubic\" ease = self.ease da_origin = da.copy() is_xarray =", "( 0.5 * ( 1 - ( ts[~index] * ts[~index] * ts[~index] -", "result def _prep_xarray(self, da): name = da.name interp = da.attrs.get(\"interp\") ease = da.attrs.get(\"ease\")", "-1] = array[:, -1] else: result = result.values return result def _interp_color(self, array,", "+ 17 / 5.0 ) ts[index2] = ( (4356 / 361.0 * ts[index2]", "with param.edit_constant(self): self.num_steps = num_steps num_result = (num_states - 1) * num_steps return", "def _calc_shapes(self, array): num_items, num_states = array.shape if self.frames is None: if num_states", "== \"in_out\": index = ts < 0.5 ts[index] = 0.5 * (1 -", "fill = 0.0 dtype = None result = np.full((num_items, num_result), fill, dtype=dtype) indices", "1 - ( ts[~index] * ts[~index] * ts[~index] - ts[~index] * np.sin(ts[~index] *", "EASES, INTERPS, PRECEDENCES, REVERTS from .util import is_str class Easing(param.Parameterized): interp = param.ClassSelector(", "& ~index0 index3 = ts >= 9 / 10 ts[index0] = 121 *", "indices] = array # (1, num_states) return result def _interp_fill(self, array, num_states, num_steps,", "ts) * np.power(2, 10 * (ts - 1)) elif ease == \"out\": ts", "ts < 0.5 ts[index] = 8 * ts[index] * ts[index] * ts[index] *", "num_result), fill, dtype=dtype) indices = np.arange(num_states) * num_steps indices[-1] -= 1 result[:, indices]", "= np.repeat(array[:, 1:], num_steps, axis=-1) stop_nans = np.isnan(stop) tiled_steps = np.tile(steps, (num_states -", "== \"in_out\": index = ts < 0.5 ts[index] = 4 * ts[index] *", "1 elif ease == \"in_out\": index = ts < 0.5 ts[index] = (", "- 1) * (ts - 1) * (1 - ts) + 1 elif", "-10 * (2 * ts[~index] - 1)) + 2 ) return ts def", "\"grid_item\" in dims: da = da.stack({\"stacked\": [\"grid_item\", \"grid_y\", \"grid_x\"]}) elif \"batch\" in dims:", "* np.pi / 2) + 1 elif ease == \"out\": ts = np.sin(ts", "- 10) ts[index1] = -0.5 * np.power(2, (-20 * ts[index1]) + 10) +", "return result def _rebuild_da(self, result, da, dims, coords): if len(dims) == 1: result", "+ 0.5 ) return ts def _bounce(self, ts, ease): index = ts <", "steps, interp, ease, num_states, num_steps, num_items ) result = conversion(result.ravel()).values result = result.reshape(num_items,", "= xr.DataArray( result, dims=da.dims, coords=coords, name=da.name, attrs=da.attrs, ) if \"stacked\" in result.dims: result", "state, and \" \"rollback is like traceback, but disregards the \" \"original's path", "axis=-1) stop_nans = np.isnan(stop) tiled_steps = np.tile(steps, (num_states - 1) * num_items).reshape( num_items,", "array): num_items, num_states = array.shape if self.frames is None: if num_states < 10:", "self.num_steps = num_steps num_result = (num_states - 1) * num_steps return num_items, num_states,", "/ 361.0 * ts[index2] * ts[index2]) - (35442 / 1805.0 * ts[index2]) +", "np.sqrt((2 - ts) * ts) elif ease == \"in_out\": index = ts <", "num_steps, num_items) array_dtype = array.dtype if name in [\"duration\", \"remark\", \"xerr\", \"yerr\"] and", "= ts < 0.5 ts[index] = ( 0.5 * np.sin(13 * np.pi /", "1 - np.power(2, -10 * ts[index]) elif ease == \"in_out\": index0 = (ts", "ts, ease): if ease == \"in\": ts = np.sin((ts - 1) * np.pi", "= num_steps num_result = (num_states - 1) * num_steps return num_items, num_states, num_steps,", "* ts[~index] + 1 ) return ts def _sine(self, ts, ease): if ease", "(ts * ts * ts - ts * np.sin(ts * np.pi)) elif ease", "1 - (2 * ts[~index] - 1) ts[~index] = ( 0.5 * (", "& ~index1 & ~index0 index3 = ts >= 9 / 10 ts[index0] =", "(513 / 25.0 * ts[index3]) + 268 / 25.0 ) if ease ==", "== \"in_out\": ts = 0.5 * (1 - np.cos(ts * np.pi)) return ts", "ts[index0] = 0.5 * np.power(2, (20 * ts[index0]) - 10) ts[index1] = -0.5", "( ts[~index] * ts[~index] * ts[~index] - ts[~index] * np.sin(ts[~index] * np.pi) )", "matplotlib.colors import LinearSegmentedColormap, rgb2hex from .configuration import DEFAULTS, EASES, INTERPS, PRECEDENCES, REVERTS from", "= da.attrs.get(\"interp\") ease = da.attrs.get(\"ease\") for item_dim in da.dims: if \"item\" in item_dim:", "ease == \"out\": ts = -(ts * (ts - 2)) elif ease ==", "ease == \"in_out\": index = ts < 0.5 ts[index] = 2 * ts[index]", "name in [\"duration\", \"remark\", \"xerr\", \"yerr\"] and not is_errorbar_morph: result = self._interp_first( array,", "of easing; {EASES}\", precedence=PRECEDENCES[\"interp\"], ) frames = param.Integer( default=None, bounds=(1, None), doc=\"Number of", "\"discrete_trail\", \"morph_trail\", \"tick_label\", \"bar_label\") ): result = self._interp_fill(array, num_states, num_steps, name) elif np.issubdtype(array_dtype,", "param import xarray as xr from matplotlib.colors import LinearSegmentedColormap, rgb2hex from .configuration import", "num_states, num_steps, name): indices = np.arange(num_states * num_steps - num_steps) result = (", "num_items).reshape( num_items, -1 ) weights = getattr(self, f\"_{interp.lower()}\")(tiled_steps, ease) result = stop *", "if ease == \"in\": ts = ts * ts elif ease == \"out\":", "ts def _quadratic(self, ts, ease): if ease == \"in\": ts = ts *", "ease == \"in_out\": index = ts < 0.5 ts[index] = 16 * ts[index]", "(ts != 0) & (ts < 0.5) & (ts != 1) index1 =", "def __init__(self, **kwds): super().__init__(**kwds) def interpolate(self, da, name=\"\"): interp = self.interp or \"cubic\"", "shortest path to the initial state, \" \"traceback backtracks the original path to", "1) * (ts - 1) * (1 - ts) + 1 elif ease", "index1 = (ts < 8 / 11) & ~index0 index2 = (ts <", "(num_steps == 1 or num_states == 1) and self.revert is None: return da_origin", "ts[~index] = 1 ts[index] = 1 - np.power(2, -10 * ts[index]) elif ease", "must be after number result = self._interp_color(array, num_result) elif is_bar: result = self._interp_fill(array,", "_back(self, ts, ease): if ease == \"in\": ts = ts * ts *", "!= 1 ts[~index] = 1 ts[index] = 1 - np.power(2, -10 * ts[index])", "( (363 / 40.0 * ts[index1] * ts[index1]) - (99 / 10.0 *", "ts[~index] = 2 * ts[~index] - 2 ts[~index] = 0.5 * ts[~index] *", "ts ts = 1 - (ts * ts * ts - ts *", "* ts[~index]) - 1 return ts def _cubic(self, ts, ease): if ease ==", "in result.dims: result = result.unstack().transpose(*dims) return result def _interp_first(self, array, num_states, num_steps, num_items,", "break dims = da.dims if da.ndim > 2: # more than (item, state)", "ts[~index] = ts[~index] - 1 ts[~index] = -8 * ts[~index] * ts[~index] *", "* ts[index] * ts[index] * ts[index] ts[~index] = (2 * ts[~index]) - 2", "= 0 ts[index] = np.power(2, 10 * (ts[index] - 1)) elif ease ==", "def _quadratic(self, ts, ease): if ease == \"in\": ts = ts * ts", "num_steps, num_steps), ) .T.reindex(indices) .T ) if not name.endswith(\"discrete_trail\"): result = result.ffill(axis=1).fillna(\"\").values result[:,", "...) break dims = da.dims if da.ndim > 2: # more than (item,", "= False if is_xarray: if \"state\" not in da.dims: return da_origin ( da,", "class Easing(param.Parameterized): interp = param.ClassSelector( default=None, class_=Iterable, doc=f\"Interpolation method; {INTERPS}\", precedence=PRECEDENCES[\"interp\"], ) ease", "num_items, num_states, num_steps, num_result = self._calc_shapes(array) if (num_steps == 1 or num_states ==", "num_states * 2)) if num_states > 2: result = np.roll(result, num_roll, axis=-1) result", "ts, ease): index = ts < 0.5 if ease == \"in\": ts =", "* np.sin(13 * np.pi / 2 * (2 * ts[index])) * np.power(2, 10", "self.interp or \"cubic\" ease = self.ease da_origin = da.copy() is_xarray = isinstance(da, xr.DataArray)", "(ts - 1) * (ts - 1) * (1 - ts) + 1", "1)) * np.power(2, -10 * ts) + 1 elif ease == \"in_out\": index", "= da.attrs.get(\"ease\") for item_dim in da.dims: if \"item\" in item_dim: if \"batch\" in", "def _interp_time( self, array, conversion, steps, interp, ease, num_states, num_steps, num_items ): array", "path durations\", precedence=PRECEDENCES[\"interp\"], ) num_states = param.Integer(doc=\"Number of states\", **DEFAULTS[\"num_kwds\"]) num_steps = param.Integer(", "< 0.5 ts[index] = 8 * ts[index] * ts[index] * ts[index] * ts[index]", "* np.power(2, (-20 * ts[index1]) + 10) + 1 return ts def _elastic(self,", "= self._rebuild_da(result, da, dims, coords) return result def _prep_xarray(self, da): name = da.name", "ease == \"in\": ts = ts * ts elif ease == \"out\": ts", "1)) * np.power(2, -10 * (2 * ts[~index] - 1)) + 2 )", "**kwds): super().__init__(**kwds) def interpolate(self, da, name=\"\"): interp = self.interp or \"cubic\" ease =", "!= 1) index1 = (ts != 0) & (ts >= 0.5) & (ts", "< 0.5 if ease == \"in\": ts = 1 - ts elif ease", "np.sqrt(-((2 * ts[~index]) - 3) * ((2 * ts[~index]) - 1)) + 1", "param.Integer( default=None, bounds=(1, None), doc=\"Number of frames between each base state\", precedence=PRECEDENCES[\"interp\"], )", "num_steps, num_result): result = np.repeat(array, num_steps, axis=-1) num_roll = -int(np.ceil(num_steps / num_states *", "\"in_out\": index0 = (ts != 0) & (ts < 0.5) & (ts !=", "path to the initial state, and \" \"rollback is like traceback, but disregards", "= self._apply_revert(result, name) if is_xarray: result = self._rebuild_da(result, da, dims, coords) return result", "1 elif ease == \"in_out\": index = ts < 0.5 ts[index] = 16", "param.Integer( doc=\"Number of frames between each base state\", **DEFAULTS[\"num_kwds\"] ) def __init__(self, **kwds):", "* ts[index1]) + 10) + 1 return ts def _elastic(self, ts, ease): if", "result = self._interp_color(array, num_result) elif is_bar: result = self._interp_fill(array, num_states, num_steps, name) else:", "ease, is_bar, is_errorbar_morph def _prep_array(self, da): array = np.array(da) if array.ndim == 1:", "): init = np.repeat(array[:, :-1], num_steps, axis=-1) init_nans = np.isnan(init) init[init_nans] = 0", "self._prep_xarray(da) array = self._prep_array(da) num_items, num_states, num_steps, num_result = self._calc_shapes(array) if (num_steps ==", "ease = self.ease da_origin = da.copy() is_xarray = isinstance(da, xr.DataArray) is_bar = False", "2) ts[~index] = ts[~index] * 2 - 1 index0 = ts < 4", ") ease = param.ClassSelector( default=\"in_out\", class_=Iterable, doc=f\"Type of easing; {EASES}\", precedence=PRECEDENCES[\"interp\"], ) frames", "= (-2 * ts[~index] * ts[~index]) + (4 * ts[~index]) - 1 return", "= result.values return result def _interp_color(self, array, num_result): results = [] for colors", "1 elif ease == \"in_out\": index = ts < 0.5 ts[index] = 8", "is_xarray: result = self._rebuild_da(result, da, dims, coords) return result def _prep_xarray(self, da): name", "elif \"batch\" in dims: da = da.stack({\"stacked\": [item_dim, \"batch\"]}) da = da.transpose(\"stacked\", \"state\")", "LinearSegmentedColormap.from_list(\"eased\", colors, N=num_result) results.append([rgb2hex(rgb) for rgb in cmap(np.arange(num_result))]) result = np.array(results) return result", "= np.arange(num_states) * num_steps indices[-1] -= 1 result[:, indices] = array # (1,", "3) * ((2 * ts[~index]) - 1)) + 1 ) return ts def", "np.sin(ts * np.pi / 2) elif ease == \"in_out\": ts = 0.5 *", "_prep_array(self, da): array = np.array(da) if array.ndim == 1: array = array[np.newaxis, :]", "= self._interp_color(array, num_result) elif is_bar: result = self._interp_fill(array, num_states, num_steps, name) else: #", "ts < 0.5 ts[index] = 2 * ts[index] ts[index] = 0.5 * (", "coords = da.drop_vars(\"state\", errors=\"ignore\").coords is_bar = da.attrs.get(\"is_bar\") is_errorbar_morph = da.attrs.get(\"is_errorbar_morph\") return da, name,", "ts[index1] = ( (363 / 40.0 * ts[index1] * ts[index1]) - (99 /", "int(num_result / 2) result = result[:, half_way:-half_way] if num_steps % 2 != 0:", "_interp_fill(self, array, num_states, num_steps, name): indices = np.arange(num_states * num_steps - num_steps) result", "axis=-1) init_nans = np.isnan(init) init[init_nans] = 0 # temporarily fill the nans stop", "# str result = self._interp_text(array, num_states, num_steps, num_result) if self.revert in [\"traceback\", \"rollback\"]:", "* ts[index] ts[~index] = (-2 * ts[~index] * ts[~index]) + (4 * ts[~index])", "ts, ease): if ease == \"in\": ts = ts * ts * ts", "states\", **DEFAULTS[\"num_kwds\"]) num_steps = param.Integer( doc=\"Number of frames between each base state\", **DEFAULTS[\"num_kwds\"]", "= int(num_result / 2) result = result[:, half_way:-half_way] if num_steps % 2 !=", "weights = getattr(self, f\"_{interp.lower()}\")(tiled_steps, ease) result = stop * weights + init *", "columns=np.arange(0, num_states * num_steps, num_steps), ) .T.reindex(indices) .T ) if not name.endswith(\"discrete_trail\"): result", "fill the nans stop = np.repeat(array[:, 1:], num_steps, axis=-1) stop_nans = np.isnan(stop) tiled_steps", "16061 / 1805.0 ) ts[index3] = ( (54 / 5.0 * ts[index3] *", "ts, ease): return ts def _quadratic(self, ts, ease): if ease == \"in\": ts", "array, num_states, num_steps, num_result): result = np.repeat(array, num_steps, axis=-1) num_roll = -int(np.ceil(num_steps /", "1805.0 ) ts[index3] = ( (54 / 5.0 * ts[index3] * ts[index3]) -", "num_steps return num_items, num_states, num_steps, num_result def _apply_revert(self, result, name): if result.ndim ==", "ts[~index]) - 2 ts[~index] = ( 0.5 * ts[~index] * ts[~index] * ts[~index]", "0.5 * ( np.sqrt(-((2 * ts[~index]) - 3) * ((2 * ts[~index]) -", "num_steps), ) .T.reindex(indices) .T ) if not name.endswith(\"discrete_trail\"): result = result.ffill(axis=1).fillna(\"\").values result[:, -1]", "result_back = np.repeat(1 / 60, result_back.shape[-1])[np.newaxis, :] result = np.hstack([result, result_back]) return result", "else: da = da.transpose(item_dim, \"state\", ...) break dims = da.dims if da.ndim >", "frames = param.Integer( default=None, bounds=(1, None), doc=\"Number of frames between each base state\",", "/ 5.0 ) ts[index2] = ( (4356 / 361.0 * ts[index2] * ts[index2])", "numpy as np import pandas as pd import param import xarray as xr", "& (ts != 1) ts[index0] = 0.5 * np.power(2, (20 * ts[index0]) -", "ts[~index] * ts[~index]) + (4 * ts[~index]) - 1 return ts def _cubic(self,", "(1, num_states) return result def _interp_fill(self, array, num_states, num_steps, name): indices = np.arange(num_states", "ease == \"out\": ts = 1 - ts ts = 1 - (ts", "\"batch\" in da.dims: da = da.transpose(item_dim, \"batch\", \"state\", ...) else: da = da.transpose(item_dim,", "result def _linear(self, ts, ease): return ts def _quadratic(self, ts, ease): if ease", "_sine(self, ts, ease): if ease == \"in\": ts = np.sin((ts - 1) *", "[] for colors in array: # item, state cmap = LinearSegmentedColormap.from_list(\"eased\", colors, N=num_result)", "\" \"traceback backtracks the original path to the initial state, and \" \"rollback", "= da.stack({\"stacked\": [item_dim, \"batch\"]}) da = da.transpose(\"stacked\", \"state\") coords = da.drop_vars(\"state\", errors=\"ignore\").coords is_bar", "= [] for colors in array: # item, state cmap = LinearSegmentedColormap.from_list(\"eased\", colors,", "0: result = result[:, :-1] return result def _interp_time( self, array, conversion, steps,", "is_xarray = isinstance(da, xr.DataArray) is_bar = False if is_xarray: if \"state\" not in", "* num_steps indices[-1] -= 1 result[:, indices] = array # (1, num_states) return", "= param.Integer( doc=\"Number of frames between each base state\", **DEFAULTS[\"num_kwds\"] ) def __init__(self,", "> 2: result = np.roll(result, num_roll, axis=-1) result = result[:, :num_result] else: half_way", "~index0 index2 = (ts < 9 / 10) & ~index1 & ~index0 index3", "= (ts != 0) & (ts < 0.5) & (ts != 1) index1", "array = array.astype(float) result = self._interp_numeric( array, steps, interp, ease, num_states, num_steps, num_items", "= (num_states - 1) * num_steps return num_items, num_states, num_steps, num_result def _apply_revert(self,", "conversion(result.ravel()).values result = result.reshape(num_items, -1) return result def _interp_numeric( self, array, steps, interp,", "< 0.5 ts[index] = 2 * ts[index] * ts[index] ts[~index] = (-2 *", "- ts * np.sin(ts * np.pi) elif ease == \"out\": ts = 1", "0.5 ts[index] = 2 * ts[index] ts[index] = 0.5 * ( ts[index] *", "* ts[~index] - 1)) + 2 ) return ts def _back(self, ts, ease):", "for item_dim in da.dims: if \"item\" in item_dim: if \"batch\" in da.dims: da", "1) * (ts - 1) * (ts - 1) * (1 - ts)", "for colors in array: # item, state cmap = LinearSegmentedColormap.from_list(\"eased\", colors, N=num_result) results.append([rgb2hex(rgb)", "[\"traceback\", \"rollback\"]: result = self._apply_revert(result, name) if is_xarray: result = self._rebuild_da(result, da, dims,", "result.dims: result = result.unstack().transpose(*dims) return result def _interp_first(self, array, num_states, num_steps, num_items, num_result,", "\"traceback backtracks the original path to the initial state, and \" \"rollback is", "xr.DataArray( result, dims=da.dims, coords=coords, name=da.name, attrs=da.attrs, ) if \"stacked\" in result.dims: result =", "== \"out\": ts = -(ts * (ts - 2)) elif ease == \"in_out\":", "-(ts * (ts - 2)) elif ease == \"in_out\": index = ts <", "ts = ts * ts elif ease == \"out\": ts = -(ts *", "if name in [\"duration\", \"remark\", \"xerr\", \"yerr\"] and not is_errorbar_morph: result = self._interp_first(", "= -8 * ts[~index] * ts[~index] * ts[~index] * ts[~index] + 1 return", "PRECEDENCES, REVERTS from .util import is_str class Easing(param.Parameterized): interp = param.ClassSelector( default=None, class_=Iterable,", "num_states)) else: num_steps = int(np.ceil(100 / num_states)) else: num_steps = self.frames with param.edit_constant(self):", "param.ObjectSelector( default=None, objects=REVERTS, doc=\"Method for reverting to the initial state; \" \"boomerang finds", "* ts[~index] * ts[~index] * ts[~index] + 1 return ts def _quintic(self, ts,", "\"in_out\": index = ts < 0.5 ts[index] = 8 * ts[index] * ts[index]", "index = ts < 0.5 if ease == \"in\": ts = 1 -", "collections.abc import Iterable import numpy as np import pandas as pd import param", "* np.power(2, -10 * (2 * ts[~index] - 1)) + 2 ) return", "result[:, ::-1] if name == \"duration\" and self.revert == \"rollback\": result_back = np.repeat(1", "elif ease == \"out\": ts = 1 - ts ts = 1 -", "ts[index1]) + 10) + 1 return ts def _elastic(self, ts, ease): if ease", "ts[~index]) - 1 return ts def _cubic(self, ts, ease): if ease == \"in\":", "np.power(2, -10 * ts[index]) elif ease == \"in_out\": index0 = (ts != 0)", "import param import xarray as xr from matplotlib.colors import LinearSegmentedColormap, rgb2hex from .configuration", "(ts != 0) & (ts >= 0.5) & (ts != 1) ts[index0] =", "def _interp_numeric( self, array, steps, interp, ease, num_states, num_steps, num_items ): init =", "(ts[index] * ts[index]))) ts[~index] = 0.5 * ( np.sqrt(-((2 * ts[~index]) - 3)", "* ts) * np.power(2, 10 * (ts - 1)) elif ease == \"out\":", "index = ts < 0.5 ts[index] = 16 * ts[index] * ts[index] *", "== \"in\": ts = ts * ts * ts elif ease == \"out\":", "ts[index] - ts[index] * np.sin(ts[index] * np.pi) ) ts[~index] = 1 - (2", "ts def _elastic(self, ts, ease): if ease == \"in\": ts = np.sin(13 *", "objects=REVERTS, doc=\"Method for reverting to the initial state; \" \"boomerang finds the shortest", "state; \" \"boomerang finds the shortest path to the initial state, \" \"traceback", "== \"out\": index = ts != 1 ts[~index] = 1 ts[index] = 1", "= 1 - (2 * ts[~index] - 1) ts[~index] = ( 0.5 *", "None: return da_origin steps = np.linspace(0, 1, num_steps) interp_args = (steps, interp, ease,", "precedence=PRECEDENCES[\"interp\"], ) ease = param.ClassSelector( default=\"in_out\", class_=Iterable, doc=f\"Type of easing; {EASES}\", precedence=PRECEDENCES[\"interp\"], )", "def _back(self, ts, ease): if ease == \"in\": ts = ts * ts", "indices = np.arange(num_states) * num_steps indices[-1] -= 1 result[:, indices] = array #", "-= 1 result[:, indices] = array # (1, num_states) return result def _interp_fill(self,", "\"stacked\" in result.dims: result = result.unstack().transpose(*dims) return result def _interp_first(self, array, num_states, num_steps,", "num_result): results = [] for colors in array: # item, state cmap =", "= ts * ts * ts * ts elif ease == \"out\": ts", "1: array = array[np.newaxis, :] if self.revert == \"boomerang\": array = np.hstack([array, array[:,", "= da.drop_vars(\"state\", errors=\"ignore\").coords is_bar = da.attrs.get(\"is_bar\") is_errorbar_morph = da.attrs.get(\"is_errorbar_morph\") return da, name, dims,", "< 0.5 ts[index] = ( 0.5 * np.sin(13 * np.pi / 2 *", "(ts - 2)) elif ease == \"in_out\": index = ts < 0.5 ts[index]", "array, steps, interp, ease, num_states, num_steps, num_items ) result = conversion(result.ravel()).values result =", "= conversion(result.ravel()).values result = result.reshape(num_items, -1) return result def _interp_numeric( self, array, steps,", "1) * (ts - 1) * (ts - 1) * (ts - 1)", "ts[index3] * ts[index3]) - (513 / 25.0 * ts[index3]) + 268 / 25.0", "None result = np.full((num_items, num_result), fill, dtype=dtype) indices = np.arange(num_states) * num_steps indices[-1]", "ease == \"in\": ts = 1 - np.sqrt(1 - (ts * ts)) elif", "\"in\": ts = ts * ts * ts * ts * ts elif", "ts def _quartic(self, ts, ease): if ease == \"in\": ts = ts *", "== \"in_out\": index = ts < 0.5 ts[index] = 16 * ts[index] *", "errors=\"ignore\").coords is_bar = da.attrs.get(\"is_bar\") is_errorbar_morph = da.attrs.get(\"is_errorbar_morph\") return da, name, dims, coords, interp,", "_interp_color(self, array, num_result): results = [] for colors in array: # item, state", "easing; {EASES}\", precedence=PRECEDENCES[\"interp\"], ) frames = param.Integer( default=None, bounds=(1, None), doc=\"Number of frames", "num_items ): init = np.repeat(array[:, :-1], num_steps, axis=-1) init_nans = np.isnan(init) init[init_nans] =", "2 ts[~index] = ( 0.5 * ts[~index] * ts[~index] * ts[~index] * ts[~index]", "* ts[index]) - 1)) ) ts[~index] = 0.5 * ( np.sin(-13 * np.pi", "of frames between each base state\", precedence=PRECEDENCES[\"interp\"], ) revert = param.ObjectSelector( default=None, objects=REVERTS,", "= self._interp_time(array, pd.to_timedelta, *interp_args) elif np.issubdtype(array_dtype, np.number) and not is_bar: if name ==", "ts[~index] * ts[~index] + 1 return ts def _quartic(self, ts, ease): if ease", "result = self._interp_time(array, pd.to_timedelta, *interp_args) elif np.issubdtype(array_dtype, np.number) and not is_bar: if name", "> 2: # more than (item, state) if \"grid_item\" in dims: da =", "da.attrs.get(\"is_bar\") is_errorbar_morph = da.attrs.get(\"is_errorbar_morph\") return da, name, dims, coords, interp, ease, is_bar, is_errorbar_morph", "(ts - 1) * (ts - 1) * (ts - 1) * (ts", "= 0.5 * np.power(2, (20 * ts[index0]) - 10) ts[index1] = -0.5 *", "index = ts < 0.5 ts[index] = 2 * ts[index] ts[index] = 0.5", "np.sin(-13 * np.pi / 2 * ((2 * ts[~index] - 1) + 1))", "= array[:, -1] else: result = result.values return result def _interp_color(self, array, num_result):", "/ 2 * (2 * ts[index])) * np.power(2, 10 * ((2 * ts[index])", "reverting to the initial state; \" \"boomerang finds the shortest path to the", "in [\"duration\", \"remark\", \"xerr\", \"yerr\"] and not is_errorbar_morph: result = self._interp_first( array, num_states,", "2: result = np.roll(result, num_roll, axis=-1) result = result[:, :num_result] else: half_way =", "import is_str class Easing(param.Parameterized): interp = param.ClassSelector( default=None, class_=Iterable, doc=f\"Interpolation method; {INTERPS}\", precedence=PRECEDENCES[\"interp\"],", "+ 268 / 25.0 ) if ease == \"in\": ts = 1 -", "- 1) + 1 elif ease == \"in_out\": index = ts < 0.5", "_quintic(self, ts, ease): if ease == \"in\": ts = ts * ts *", "(ts - 1) * (1 - ts) + 1 elif ease == \"in_out\":", "coords, interp, ease, is_bar, is_errorbar_morph, ) = self._prep_xarray(da) array = self._prep_array(da) num_items, num_states,", "= array[np.newaxis, :] if self.revert == \"boomerang\": array = np.hstack([array, array[:, :1]]) return", "elif np.issubdtype(array_dtype, np.number) and not is_bar: if name == \"central_longitude\": interp = \"linear\"", "num_items, -1 ) weights = getattr(self, f\"_{interp.lower()}\")(tiled_steps, ease) result = stop * weights", "1)) elif ease == \"out\": ts = np.sin(-13 * np.pi / 2 *", "np.hstack([result, result_back]) return result def _rebuild_da(self, result, da, dims, coords): if len(dims) ==", "_cubic(self, ts, ease): if ease == \"in\": ts = ts * ts *", "* ts[~index] * ts[~index] - ts[~index] * np.sin(ts[~index] * np.pi) ) ) +", "num_steps, num_result) if self.revert in [\"traceback\", \"rollback\"]: result = self._apply_revert(result, name) if is_xarray:", "= (ts - 1) * (ts - 1) * (ts - 1) +", "__init__(self, **kwds): super().__init__(**kwds) def interpolate(self, da, name=\"\"): interp = self.interp or \"cubic\" ease", "~index0 index3 = ts >= 9 / 10 ts[index0] = 121 * ts[index0]", "conversion, steps, interp, ease, num_states, num_steps, num_items ): array = array.astype(float) result =", "after number result = self._interp_color(array, num_result) elif is_bar: result = self._interp_fill(array, num_states, num_steps,", "2 * (2 * ts[index])) * np.power(2, 10 * ((2 * ts[index]) -", "= 0.5 * ( np.sin(-13 * np.pi / 2 * ((2 * ts[~index]", "* np.sin(ts[~index] * np.pi) ) ) + 0.5 ) return ts def _bounce(self,", "{INTERPS}\", precedence=PRECEDENCES[\"interp\"], ) ease = param.ClassSelector( default=\"in_out\", class_=Iterable, doc=f\"Type of easing; {EASES}\", precedence=PRECEDENCES[\"interp\"],", "== \"out\": ts = np.sqrt((2 - ts) * ts) elif ease == \"in_out\":", "(ts != 1) ts[index0] = 0.5 * np.power(2, (20 * ts[index0]) - 10)", "(ts - 1) * (ts - 1) * (ts - 1) * (1", "result def _interp_color(self, array, num_result): results = [] for colors in array: #", "- np.sqrt(1 - (ts * ts)) elif ease == \"out\": ts = np.sqrt((2", "num_steps, num_items, num_result, name ) elif interp == \"fill\" or name.endswith( (\"zoom\", \"discrete_trail\",", "- 1 return ts def _cubic(self, ts, ease): if ease == \"in\": ts", "more than (item, state) if \"grid_item\" in dims: da = da.stack({\"stacked\": [\"grid_item\", \"grid_y\",", "or num_states == 1) and self.revert is None: return da_origin steps = np.linspace(0,", "ts[~index] * ts[~index] * ts[~index] * ts[~index] * ts[~index] + 1 ) return", "\"tick_label\", \"bar_label\") ): result = self._interp_fill(array, num_states, num_steps, name) elif np.issubdtype(array_dtype, np.datetime64): result", "ts[~index] - ts[~index] * np.sin(ts[~index] * np.pi) ) ) + 0.5 ) return", "name=\"\"): interp = self.interp or \"cubic\" ease = self.ease da_origin = da.copy() is_xarray", "np.array(da) if array.ndim == 1: array = array[np.newaxis, :] if self.revert == \"boomerang\":", "result[:, :num_result] else: half_way = int(num_result / 2) result = result[:, half_way:-half_way] if", "\"batch\" in dims: da = da.stack({\"stacked\": [item_dim, \"batch\"]}) da = da.transpose(\"stacked\", \"state\") coords", "* ts[index0]) - 10) ts[index1] = -0.5 * np.power(2, (-20 * ts[index1]) +", "array, conversion, steps, interp, ease, num_states, num_steps, num_items ): array = array.astype(float) result", "- 1)) + 1 ) return ts def _exponential(self, ts, ease): if ease", "param.ClassSelector( default=None, class_=Iterable, doc=f\"Interpolation method; {INTERPS}\", precedence=PRECEDENCES[\"interp\"], ) ease = param.ClassSelector( default=\"in_out\", class_=Iterable,", "ts * ts * ts * ts * ts elif ease == \"out\":", ".T.reindex(indices) .T ) if not name.endswith(\"discrete_trail\"): result = result.ffill(axis=1).fillna(\"\").values result[:, -1] = array[:,", "result.ffill(axis=1).fillna(\"\").values result[:, -1] = array[:, -1] else: result = result.values return result def", "* num_items).reshape( num_items, -1 ) weights = getattr(self, f\"_{interp.lower()}\")(tiled_steps, ease) result = stop", "* ts) + 1 elif ease == \"in_out\": index = ts < 0.5", "da.stack({\"stacked\": [\"grid_item\", \"grid_y\", \"grid_x\"]}) elif \"batch\" in dims: da = da.stack({\"stacked\": [item_dim, \"batch\"]})", "num_items) array_dtype = array.dtype if name in [\"duration\", \"remark\", \"xerr\", \"yerr\"] and not", "None), doc=\"Number of frames between each base state\", precedence=PRECEDENCES[\"interp\"], ) revert = param.ObjectSelector(", "self.revert == \"boomerang\": array = np.hstack([array, array[:, :1]]) return array def _calc_shapes(self, array):", "import Iterable import numpy as np import pandas as pd import param import", "= result[:, :num_result] else: half_way = int(num_result / 2) result = result[:, half_way:-half_way]", "ts[index2]) - (35442 / 1805.0 * ts[index2]) + 16061 / 1805.0 ) ts[index3]", "np.issubdtype(array_dtype, np.number) and not is_bar: if name == \"central_longitude\": interp = \"linear\" result", "ts[index] * ts[index] ts[~index] = ts[~index] - 1 ts[~index] = -8 * ts[~index]", "* ts[~index] - ts[~index] * np.sin(ts[~index] * np.pi) ) ) + 0.5 )", "num_states, num_steps, num_items, num_result, name): if is_str(array): fill = \"\" dtype = np.object", "= da.transpose(item_dim, \"batch\", \"state\", ...) else: da = da.transpose(item_dim, \"state\", ...) break dims", "result def _interp_text(self, array, num_states, num_steps, num_result): result = np.repeat(array, num_steps, axis=-1) num_roll", "param.ClassSelector( default=\"in_out\", class_=Iterable, doc=f\"Type of easing; {EASES}\", precedence=PRECEDENCES[\"interp\"], ) frames = param.Integer( default=None,", "ts[index] = 0.5 * (1 - ts[index]) ts[~index] = 0.5 * ts[~index] +", "from collections.abc import Iterable import numpy as np import pandas as pd import", "np.tile(steps, (num_states - 1) * num_items).reshape( num_items, -1 ) weights = getattr(self, f\"_{interp.lower()}\")(tiled_steps,", "*interp_args) elif np.issubdtype(array_dtype, np.number) and not is_bar: if name == \"central_longitude\": interp =", "-int(np.ceil(num_steps / num_states * 2)) if num_states > 2: result = np.roll(result, num_roll,", "num_items ) result = conversion(result.ravel()).values result = result.reshape(num_items, -1) return result def _interp_numeric(", "np.power(2, (20 * ts[index0]) - 10) ts[index1] = -0.5 * np.power(2, (-20 *", ") result = conversion(result.ravel()).values result = result.reshape(num_items, -1) return result def _interp_numeric( self,", "ts[~index] - 1)) + 2 ) return ts def _back(self, ts, ease): if", "return ts def _back(self, ts, ease): if ease == \"in\": ts = ts", "* np.pi) ) ) + 0.5 ) return ts def _bounce(self, ts, ease):", "2)) elif ease == \"in_out\": index = ts < 0.5 ts[index] = 2", "num_states == 1) and self.revert is None: return da_origin steps = np.linspace(0, 1,", "/ 2 * ts) * np.power(2, 10 * (ts - 1)) elif ease", "0.5 * ( 1 - ( ts[~index] * ts[~index] * ts[~index] - ts[~index]", "* ts[index2]) - (35442 / 1805.0 * ts[index2]) + 16061 / 1805.0 )", "method; {INTERPS}\", precedence=PRECEDENCES[\"interp\"], ) ease = param.ClassSelector( default=\"in_out\", class_=Iterable, doc=f\"Type of easing; {EASES}\",", "_linear(self, ts, ease): return ts def _quadratic(self, ts, ease): if ease == \"in\":", "ts def _cubic(self, ts, ease): if ease == \"in\": ts = ts *", "ease == \"in_out\": index = ts < 0.5 ts[index] = 8 * ts[index]", "< 10: num_steps = int(np.ceil(60 / num_states)) else: num_steps = int(np.ceil(100 / num_states))", "tiled_steps = np.tile(steps, (num_states - 1) * num_items).reshape( num_items, -1 ) weights =", "result[:, indices] = array # (1, num_states) return result def _interp_fill(self, array, num_states,", "result, name): if result.ndim == 1: result_back = result[::-1] else: result_back = result[:,", "da_origin ( da, name, dims, coords, interp, ease, is_bar, is_errorbar_morph, ) = self._prep_xarray(da)", "2 * (ts + 1)) * np.power(2, -10 * ts) + 1 elif", "if ease == \"in\": ts = np.sin((ts - 1) * np.pi / 2)", "return array def _calc_shapes(self, array): num_items, num_states = array.shape if self.frames is None:", "num_items, num_states, num_steps, num_result def _apply_revert(self, result, name): if result.ndim == 1: result_back", "/ 25.0 ) if ease == \"in\": ts = 1 - ts elif", "interp = da.attrs.get(\"interp\") ease = da.attrs.get(\"ease\") for item_dim in da.dims: if \"item\" in", "= np.roll(result, num_roll, axis=-1) result = result[:, :num_result] else: half_way = int(num_result /", "if array.ndim == 1: array = array[np.newaxis, :] if self.revert == \"boomerang\": array", "2 * ts) * np.power(2, 10 * (ts - 1)) elif ease ==", "& ~index0 index2 = (ts < 9 / 10) & ~index1 & ~index0", "!= 0) & (ts >= 0.5) & (ts != 1) ts[index0] = 0.5", "results = [] for colors in array: # item, state cmap = LinearSegmentedColormap.from_list(\"eased\",", "== \"in\": ts = ts * ts * ts - ts * np.sin(ts", "\"remark\", \"xerr\", \"yerr\"] and not is_errorbar_morph: result = self._interp_first( array, num_states, num_steps, num_items,", "= result[:, ::-1] if name == \"duration\" and self.revert == \"rollback\": result_back =", "- (513 / 25.0 * ts[index3]) + 268 / 25.0 ) if ease", "num_steps, num_items ): init = np.repeat(array[:, :-1], num_steps, axis=-1) init_nans = np.isnan(init) init[init_nans]", "/ 11 index1 = (ts < 8 / 11) & ~index0 index2 =", "name.endswith( (\"zoom\", \"discrete_trail\", \"morph_trail\", \"tick_label\", \"bar_label\") ): result = self._interp_fill(array, num_states, num_steps, name)", "* ts[~index] * ts[~index] + 1 return ts def _quintic(self, ts, ease): if", "from .util import is_str class Easing(param.Parameterized): interp = param.ClassSelector( default=None, class_=Iterable, doc=f\"Interpolation method;", "/ 10) & ~index1 & ~index0 index3 = ts >= 9 / 10", "= result.reshape(num_items, -1) return result def _interp_numeric( self, array, steps, interp, ease, num_states,", "self, array, conversion, steps, interp, ease, num_states, num_steps, num_items ): array = array.astype(float)", "result = result[:, half_way:-half_way] if num_steps % 2 != 0: result = result[:,", "return result def _interp_fill(self, array, num_states, num_steps, name): indices = np.arange(num_states * num_steps", "super().__init__(**kwds) def interpolate(self, da, name=\"\"): interp = self.interp or \"cubic\" ease = self.ease", "/ 10 ts[index0] = 121 * ts[index0] * ts[index0] / 16 ts[index1] =", "np.datetime64): result = self._interp_time(array, pd.to_datetime, *interp_args) elif np.issubdtype(array_dtype, np.timedelta64): result = self._interp_time(array, pd.to_timedelta,", "ts = ts * ts * ts - ts * np.sin(ts * np.pi)", "ts < 4 / 11 index1 = (ts < 8 / 11) &", "ts * ts - ts * np.sin(ts * np.pi)) elif ease == \"in_out\":", "ts[~index] = 0.5 * ( np.sqrt(-((2 * ts[~index]) - 3) * ((2 *", "- 1)) + 2 ) return ts def _back(self, ts, ease): if ease", "0.5 * ( ts[index] * ts[index] * ts[index] - ts[index] * np.sin(ts[index] *", "* (1 - weights) result[init_nans | stop_nans] = np.nan # replace nans return", ":num_result] else: half_way = int(num_result / 2) result = result[:, half_way:-half_way] if num_steps", "ts) elif ease == \"in_out\": index = ts < 0.5 ts[index] = 0.5", "than (item, state) if \"grid_item\" in dims: da = da.stack({\"stacked\": [\"grid_item\", \"grid_y\", \"grid_x\"]})", "int(np.ceil(60 / num_states)) else: num_steps = int(np.ceil(100 / num_states)) else: num_steps = self.frames", "= ts < 0.5 ts[index] = 2 * ts[index] ts[index] = 0.5 *", "= LinearSegmentedColormap.from_list(\"eased\", colors, N=num_result) results.append([rgb2hex(rgb) for rgb in cmap(np.arange(num_result))]) result = np.array(results) return", "= self.frames with param.edit_constant(self): self.num_steps = num_steps num_result = (num_states - 1) *", "ts * ts elif ease == \"out\": ts = (ts - 1) *", ") frames = param.Integer( default=None, bounds=(1, None), doc=\"Number of frames between each base", "np.roll(result, num_roll, axis=-1) result = result[:, :num_result] else: half_way = int(num_result / 2)", "ts def _sine(self, ts, ease): if ease == \"in\": ts = np.sin((ts -", "* ts[~index] - 2 ts[~index] = 0.5 * ts[~index] * ts[~index] * ts[~index]", "/ 40.0 * ts[index1] * ts[index1]) - (99 / 10.0 * ts[index1]) +", "N=num_result) results.append([rgb2hex(rgb) for rgb in cmap(np.arange(num_result))]) result = np.array(results) return result def _interp_text(self,", "1 ts[~index] = -8 * ts[~index] * ts[~index] * ts[~index] * ts[~index] +", "ts < 0.5 ts[index] = 0.5 * (1 - np.sqrt(1 - 4 *", "( np.sin(-13 * np.pi / 2 * ((2 * ts[~index] - 1) +", "f\"_{interp.lower()}\")(tiled_steps, ease) result = stop * weights + init * (1 - weights)", "np.pi) ) ) + 0.5 ) return ts def _bounce(self, ts, ease): index", "\"out\": ts = -(ts * (ts - 2)) elif ease == \"in_out\": index", "array_dtype = array.dtype if name in [\"duration\", \"remark\", \"xerr\", \"yerr\"] and not is_errorbar_morph:", "result_back = result[:, ::-1] if name == \"duration\" and self.revert == \"rollback\": result_back", "/ 1805.0 ) ts[index3] = ( (54 / 5.0 * ts[index3] * ts[index3])", "ts[~index] = 0.5 * ( np.sin(-13 * np.pi / 2 * ((2 *", "\"rollback\"]: result = self._apply_revert(result, name) if is_xarray: result = self._rebuild_da(result, da, dims, coords)", "ts = ts * ts * ts elif ease == \"out\": ts =", "* ts[~index] - 1) ts[~index] = ( 0.5 * ( 1 - (", "- 1) ts[~index] = ( 0.5 * ( 1 - ( ts[~index] *", "ts[index] = 1 - np.power(2, -10 * ts[index]) elif ease == \"in_out\": index0", "if result.ndim == 1: result_back = result[::-1] else: result_back = result[:, ::-1] if", "else: # str result = self._interp_text(array, num_states, num_steps, num_result) if self.revert in [\"traceback\",", "= ( (54 / 5.0 * ts[index3] * ts[index3]) - (513 / 25.0", "if ease == \"in\": ts = 1 - ts elif ease == \"in_out\":", "return ts def _exponential(self, ts, ease): if ease == \"in\": index = ts", "array.shape if self.frames is None: if num_states < 10: num_steps = int(np.ceil(60 /", "return ts def _quadratic(self, ts, ease): if ease == \"in\": ts = ts", "(2 * ts[~index] - 1)) + 2 ) return ts def _back(self, ts,", "result[:, :-1] return result def _interp_time( self, array, conversion, steps, interp, ease, num_states,", "4 * (ts[index] * ts[index]))) ts[~index] = 0.5 * ( np.sqrt(-((2 * ts[~index])", "* ts[~index]) - 3) * ((2 * ts[~index]) - 1)) + 1 )", "- ( ts[~index] * ts[~index] * ts[~index] - ts[~index] * np.sin(ts[~index] * np.pi)", "-10 * ts[index]) elif ease == \"in_out\": index0 = (ts != 0) &", "* 2 - 1 index0 = ts < 4 / 11 index1 =", "1)) elif ease == \"out\": index = ts != 1 ts[~index] = 1", "_apply_revert(self, result, name): if result.ndim == 1: result_back = result[::-1] else: result_back =", "and self.revert == \"rollback\": result_back = np.repeat(1 / 60, result_back.shape[-1])[np.newaxis, :] result =", "0.5 * ( np.sin(-13 * np.pi / 2 * ((2 * ts[~index] -", ") return ts def _exponential(self, ts, ease): if ease == \"in\": index =", "= 4 * ts[index] * ts[index] * ts[index] ts[~index] = 2 * ts[~index]", "= da.attrs.get(\"is_errorbar_morph\") return da, name, dims, coords, interp, ease, is_bar, is_errorbar_morph def _prep_array(self,", "/ 16 ts[index1] = ( (363 / 40.0 * ts[index1] * ts[index1]) -", "in item_dim: if \"batch\" in da.dims: da = da.transpose(item_dim, \"batch\", \"state\", ...) else:", "* ts[index] * ts[index] * ts[index] * ts[index] ts[~index] = ts[~index] - 1", "== \"out\": ts = np.sin(-13 * np.pi / 2 * (ts + 1))", "_circular(self, ts, ease): if ease == \"in\": ts = 1 - np.sqrt(1 -", "return result def _interp_numeric( self, array, steps, interp, ease, num_states, num_steps, num_items ):", "ts = 1 - (ts * ts * ts - ts * np.sin(ts", "= array # (1, num_states) return result def _interp_fill(self, array, num_states, num_steps, name):", "axis=-1) result = result[:, :num_result] else: half_way = int(num_result / 2) result =", "= ( 0.5 * ( 1 - ( ts[~index] * ts[~index] * ts[~index]", "pandas as pd import param import xarray as xr from matplotlib.colors import LinearSegmentedColormap,", "array[np.newaxis, :] if self.revert == \"boomerang\": array = np.hstack([array, array[:, :1]]) return array", "1 elif ease == \"out\": ts = np.sin(ts * np.pi / 2) elif", "ts[~index] = -8 * ts[~index] * ts[~index] * ts[~index] * ts[~index] + 1", "ease): if ease == \"in\": ts = np.sin((ts - 1) * np.pi /", "_quartic(self, ts, ease): if ease == \"in\": ts = ts * ts *", "num_states, num_steps, num_result) if self.revert in [\"traceback\", \"rollback\"]: result = self._apply_revert(result, name) if", "num_steps num_result = (num_states - 1) * num_steps return num_items, num_states, num_steps, num_result", "ts[index] * np.sin(ts[index] * np.pi) ) ts[~index] = 1 - (2 * ts[~index]", "result = result.ffill(axis=1).fillna(\"\").values result[:, -1] = array[:, -1] else: result = result.values return", "ts[index] = 16 * ts[index] * ts[index] * ts[index] * ts[index] * ts[index]", ") if \"stacked\" in result.dims: result = result.unstack().transpose(*dims) return result def _interp_first(self, array,", "pd import param import xarray as xr from matplotlib.colors import LinearSegmentedColormap, rgb2hex from", "( ts[index] * ts[index] * ts[index] - ts[index] * np.sin(ts[index] * np.pi) )", "= param.ClassSelector( default=\"in_out\", class_=Iterable, doc=f\"Type of easing; {EASES}\", precedence=PRECEDENCES[\"interp\"], ) frames = param.Integer(", "* (ts - 1) * (1 - ts) + 1 elif ease ==", "# more than (item, state) if \"grid_item\" in dims: da = da.stack({\"stacked\": [\"grid_item\",", "- 1)) elif ease == \"out\": ts = np.sin(-13 * np.pi / 2", "ts def _quintic(self, ts, ease): if ease == \"in\": ts = ts *", "= 8 * ts[index] * ts[index] * ts[index] * ts[index] ts[~index] = ts[~index]", "array = np.hstack([array, array[:, :1]]) return array def _calc_shapes(self, array): num_items, num_states =", "ts[~index] + 1 return ts def _quintic(self, ts, ease): if ease == \"in\":", "+ 1 elif ease == \"out\": ts = np.sin(ts * np.pi / 2)", ":-1], num_steps, axis=-1) init_nans = np.isnan(init) init[init_nans] = 0 # temporarily fill the", "= np.tile(steps, (num_states - 1) * num_items).reshape( num_items, -1 ) weights = getattr(self,", "::-1] if name == \"duration\" and self.revert == \"rollback\": result_back = np.repeat(1 /", "doc=f\"Type of easing; {EASES}\", precedence=PRECEDENCES[\"interp\"], ) frames = param.Integer( default=None, bounds=(1, None), doc=\"Number", "def _interp_first(self, array, num_states, num_steps, num_items, num_result, name): if is_str(array): fill = \"\"", "= ts * ts elif ease == \"out\": ts = -(ts * (ts", "\" \"original's path durations\", precedence=PRECEDENCES[\"interp\"], ) num_states = param.Integer(doc=\"Number of states\", **DEFAULTS[\"num_kwds\"]) num_steps", "(item, state) if \"grid_item\" in dims: da = da.stack({\"stacked\": [\"grid_item\", \"grid_y\", \"grid_x\"]}) elif", "num_states, num_steps, num_items ): init = np.repeat(array[:, :-1], num_steps, axis=-1) init_nans = np.isnan(init)", "frames between each base state\", precedence=PRECEDENCES[\"interp\"], ) revert = param.ObjectSelector( default=None, objects=REVERTS, doc=\"Method", "ts[~index]) + (4 * ts[~index]) - 1 return ts def _cubic(self, ts, ease):", "\"in_out\": ts[index] = 1 - (ts[index] * 2) ts[~index] = ts[~index] * 2", "if \"batch\" in da.dims: da = da.transpose(item_dim, \"batch\", \"state\", ...) else: da =", "if ease == \"in\": index = ts != 0 ts[~index] = 0 ts[index]", "result[:, half_way:-half_way] if num_steps % 2 != 0: result = result[:, :-1] return", "= (steps, interp, ease, num_states, num_steps, num_items) array_dtype = array.dtype if name in", "result = np.hstack([result, result_back]) return result def _rebuild_da(self, result, da, dims, coords): if", "pd.DataFrame( array, columns=np.arange(0, num_states * num_steps, num_steps), ) .T.reindex(indices) .T ) if not", "return result def _linear(self, ts, ease): return ts def _quadratic(self, ts, ease): if", "def _interp_fill(self, array, num_states, num_steps, name): indices = np.arange(num_states * num_steps - num_steps)", "ts < 0.5 ts[index] = 16 * ts[index] * ts[index] * ts[index] *", "ts[~index] * ts[~index] + 1 ) return ts def _sine(self, ts, ease): if", "ts[index2] * ts[index2]) - (35442 / 1805.0 * ts[index2]) + 16061 / 1805.0", "\"bar_label\") ): result = self._interp_fill(array, num_states, num_steps, name) elif np.issubdtype(array_dtype, np.datetime64): result =", "array[:, :1]]) return array def _calc_shapes(self, array): num_items, num_states = array.shape if self.frames", "ts[index0]) - 10) ts[index1] = -0.5 * np.power(2, (-20 * ts[index1]) + 10)", "*interp_args) elif np.issubdtype(array_dtype, np.timedelta64): result = self._interp_time(array, pd.to_timedelta, *interp_args) elif np.issubdtype(array_dtype, np.number) and", "* ts * ts * ts * ts elif ease == \"out\": ts", "in da.dims: if \"item\" in item_dim: if \"batch\" in da.dims: da = da.transpose(item_dim,", "= ts * ts * ts - ts * np.sin(ts * np.pi) elif", "Iterable import numpy as np import pandas as pd import param import xarray", "* ts[index1]) + 17 / 5.0 ) ts[index2] = ( (4356 / 361.0", "( (4356 / 361.0 * ts[index2] * ts[index2]) - (35442 / 1805.0 *", "np.timedelta64): result = self._interp_time(array, pd.to_timedelta, *interp_args) elif np.issubdtype(array_dtype, np.number) and not is_bar: if", "* ts[index] * ts[index] ts[~index] = 2 * ts[~index] - 2 ts[~index] =", "# temporarily fill the nans stop = np.repeat(array[:, 1:], num_steps, axis=-1) stop_nans =", "the shortest path to the initial state, \" \"traceback backtracks the original path", "as xr from matplotlib.colors import LinearSegmentedColormap, rgb2hex from .configuration import DEFAULTS, EASES, INTERPS,", "result def _rebuild_da(self, result, da, dims, coords): if len(dims) == 1: result =", "= (ts - 1) * (ts - 1) * (ts - 1) *", "ease == \"in_out\": ts = 0.5 * (1 - np.cos(ts * np.pi)) return", "interp == \"fill\" or name.endswith( (\"zoom\", \"discrete_trail\", \"morph_trail\", \"tick_label\", \"bar_label\") ): result =", "0.5 * ts[~index] * ts[~index] * ts[~index] + 1 return ts def _quartic(self,", ") return ts def _back(self, ts, ease): if ease == \"in\": ts =", "elif ease == \"in_out\": ts = 0.5 * (1 - np.cos(ts * np.pi))", "result = self._interp_first( array, num_states, num_steps, num_items, num_result, name ) elif interp ==", "for reverting to the initial state; \" \"boomerang finds the shortest path to", "\"in\": index = ts != 0 ts[~index] = 0 ts[index] = np.power(2, 10", "interp, ease, num_states, num_steps, num_items) array_dtype = array.dtype if name in [\"duration\", \"remark\",", ") ts[index2] = ( (4356 / 361.0 * ts[index2] * ts[index2]) - (35442", "ts = 1 - ts elif ease == \"in_out\": ts[index] = 1 -", "= param.ClassSelector( default=None, class_=Iterable, doc=f\"Interpolation method; {INTERPS}\", precedence=PRECEDENCES[\"interp\"], ) ease = param.ClassSelector( default=\"in_out\",", "- (ts * ts * ts - ts * np.sin(ts * np.pi)) elif", "0) & (ts >= 0.5) & (ts != 1) ts[index0] = 0.5 *", "== \"duration\" and self.revert == \"rollback\": result_back = np.repeat(1 / 60, result_back.shape[-1])[np.newaxis, :]", "elif interp == \"fill\" or name.endswith( (\"zoom\", \"discrete_trail\", \"morph_trail\", \"tick_label\", \"bar_label\") ): result", "* ts[index] * ts[index] * ts[index] ts[~index] = ts[~index] - 1 ts[~index] =", "= ts < 0.5 ts[index] = 2 * ts[index] * ts[index] ts[~index] =", "dtype=dtype) indices = np.arange(num_states) * num_steps indices[-1] -= 1 result[:, indices] = array", "4 / 11 index1 = (ts < 8 / 11) & ~index0 index2", "10.0 * ts[index1]) + 17 / 5.0 ) ts[index2] = ( (4356 /", "\"in\": ts = 1 - ts elif ease == \"out\": pass elif ease", "ts = np.sin(13 * np.pi / 2 * ts) * np.power(2, 10 *", "- 2 ts[~index] = ( 0.5 * ts[~index] * ts[~index] * ts[~index] *", "* ts)) elif ease == \"out\": ts = np.sqrt((2 - ts) * ts)", "precedence=PRECEDENCES[\"interp\"], ) frames = param.Integer( default=None, bounds=(1, None), doc=\"Number of frames between each", "ease, num_states, num_steps, num_items) array_dtype = array.dtype if name in [\"duration\", \"remark\", \"xerr\",", "ease == \"out\": pass elif ease == \"in_out\": ts[index] = 0.5 * (1", "index = ts != 0 ts[~index] = 0 ts[index] = np.power(2, 10 *", "* np.power(2, 10 * ((2 * ts[index]) - 1)) ) ts[~index] = 0.5", "ease): if ease == \"in\": ts = np.sin(13 * np.pi / 2 *", "return result def _interp_first(self, array, num_states, num_steps, num_items, num_result, name): if is_str(array): fill", "elif np.issubdtype(array_dtype, np.datetime64): result = self._interp_time(array, pd.to_datetime, *interp_args) elif np.issubdtype(array_dtype, np.timedelta64): result =", "\"out\": index = ts != 1 ts[~index] = 1 ts[index] = 1 -", "interp = \"linear\" result = self._interp_numeric(array, *interp_args) elif name in \"c\": # must", "result def _interp_fill(self, array, num_states, num_steps, name): indices = np.arange(num_states * num_steps -", "0) & (ts < 0.5) & (ts != 1) index1 = (ts !=", "class_=Iterable, doc=f\"Type of easing; {EASES}\", precedence=PRECEDENCES[\"interp\"], ) frames = param.Integer( default=None, bounds=(1, None),", "num_items, num_result, name): if is_str(array): fill = \"\" dtype = np.object else: fill", "ts def _back(self, ts, ease): if ease == \"in\": ts = ts *", "elif ease == \"in_out\": index = ts < 0.5 ts[index] = ( 0.5", "np.power(2, -10 * (2 * ts[~index] - 1)) + 2 ) return ts", ") + 0.5 ) return ts def _bounce(self, ts, ease): index = ts", "\"\" dtype = np.object else: fill = 0.0 dtype = None result =", "0.5 if ease == \"in\": ts = 1 - ts elif ease ==", "* ts[index0] * ts[index0] / 16 ts[index1] = ( (363 / 40.0 *", "result = self._interp_fill(array, num_states, num_steps, name) else: # str result = self._interp_text(array, num_states,", "ts[~index] = 0.5 * ts[~index] * ts[~index] * ts[~index] + 1 return ts", "num_states > 2: result = np.roll(result, num_roll, axis=-1) result = result[:, :num_result] else:", "1) ts[index0] = 0.5 * np.power(2, (20 * ts[index0]) - 10) ts[index1] =", "np import pandas as pd import param import xarray as xr from matplotlib.colors", "= ts < 4 / 11 index1 = (ts < 8 / 11)", "ts[index0] / 16 ts[index1] = ( (363 / 40.0 * ts[index1] * ts[index1])", "\"boomerang\": array = np.hstack([array, array[:, :1]]) return array def _calc_shapes(self, array): num_items, num_states", "* num_steps - num_steps) result = ( pd.DataFrame( array, columns=np.arange(0, num_states * num_steps,", "ts[~index] * np.sin(ts[~index] * np.pi) ) ) + 0.5 ) return ts def", "ts[index1] * ts[index1]) - (99 / 10.0 * ts[index1]) + 17 / 5.0", "attrs=da.attrs, ) if \"stacked\" in result.dims: result = result.unstack().transpose(*dims) return result def _interp_first(self,", "result def _interp_numeric( self, array, steps, interp, ease, num_states, num_steps, num_items ): init", "== \"fill\" or name.endswith( (\"zoom\", \"discrete_trail\", \"morph_trail\", \"tick_label\", \"bar_label\") ): result = self._interp_fill(array,", "= ts[~index] - 1 ts[~index] = -8 * ts[~index] * ts[~index] * ts[~index]", "= 0.5 * ts[~index] * ts[~index] * ts[~index] + 1 return ts def", "self.revert is None: return da_origin steps = np.linspace(0, 1, num_steps) interp_args = (steps,", "( 0.5 * np.sin(13 * np.pi / 2 * (2 * ts[index])) *", "default=\"in_out\", class_=Iterable, doc=f\"Type of easing; {EASES}\", precedence=PRECEDENCES[\"interp\"], ) frames = param.Integer( default=None, bounds=(1,", "if ease == \"in\": ts = 1 - np.sqrt(1 - (ts * ts))", "( 0.5 * ts[~index] * ts[~index] * ts[~index] * ts[~index] * ts[~index] +", "= self._interp_text(array, num_states, num_steps, num_result) if self.revert in [\"traceback\", \"rollback\"]: result = self._apply_revert(result,", "index1 = (ts != 0) & (ts >= 0.5) & (ts != 1)", "num_steps = self.frames with param.edit_constant(self): self.num_steps = num_steps num_result = (num_states - 1)", "= 0.5 * ( np.sqrt(-((2 * ts[~index]) - 3) * ((2 * ts[~index])", "elif ease == \"in_out\": index = ts < 0.5 ts[index] = 8 *", "ts[~index] + 1 ) return ts def _sine(self, ts, ease): if ease ==", "array = np.array(da) if array.ndim == 1: array = array[np.newaxis, :] if self.revert", "+ 16061 / 1805.0 ) ts[index3] = ( (54 / 5.0 * ts[index3]", "else: num_steps = int(np.ceil(100 / num_states)) else: num_steps = self.frames with param.edit_constant(self): self.num_steps", "* (1 - ts[index]) ts[~index] = 0.5 * ts[~index] + 0.5 return ts", "/ 1805.0 * ts[index2]) + 16061 / 1805.0 ) ts[index3] = ( (54", "* (1 - np.cos(ts * np.pi)) return ts def _circular(self, ts, ease): if", "da, dims, coords) return result def _prep_xarray(self, da): name = da.name interp =", "/ num_states)) else: num_steps = int(np.ceil(100 / num_states)) else: num_steps = self.frames with", "# item, state cmap = LinearSegmentedColormap.from_list(\"eased\", colors, N=num_result) results.append([rgb2hex(rgb) for rgb in cmap(np.arange(num_result))])", "da = da.stack({\"stacked\": [\"grid_item\", \"grid_y\", \"grid_x\"]}) elif \"batch\" in dims: da = da.stack({\"stacked\":", "if name == \"central_longitude\": interp = \"linear\" result = self._interp_numeric(array, *interp_args) elif name", "if (num_steps == 1 or num_states == 1) and self.revert is None: return", "== \"in_out\": ts[index] = 0.5 * (1 - ts[index]) ts[~index] = 0.5 *", "doc=f\"Interpolation method; {INTERPS}\", precedence=PRECEDENCES[\"interp\"], ) ease = param.ClassSelector( default=\"in_out\", class_=Iterable, doc=f\"Type of easing;", "indices[-1] -= 1 result[:, indices] = array # (1, num_states) return result def", "ease = param.ClassSelector( default=\"in_out\", class_=Iterable, doc=f\"Type of easing; {EASES}\", precedence=PRECEDENCES[\"interp\"], ) frames =", "[\"duration\", \"remark\", \"xerr\", \"yerr\"] and not is_errorbar_morph: result = self._interp_first( array, num_states, num_steps,", "- 1) * (ts - 1) * (ts - 1) * (1 -", "= getattr(self, f\"_{interp.lower()}\")(tiled_steps, ease) result = stop * weights + init * (1", "name, dims, coords, interp, ease, is_bar, is_errorbar_morph, ) = self._prep_xarray(da) array = self._prep_array(da)", "doc=\"Method for reverting to the initial state; \" \"boomerang finds the shortest path", "ts[index] = 0.5 * (1 - np.sqrt(1 - 4 * (ts[index] * ts[index])))", "ts = np.sin((ts - 1) * np.pi / 2) + 1 elif ease", "* np.sin(ts[index] * np.pi) ) ts[~index] = 1 - (2 * ts[~index] -", "result def _interp_time( self, array, conversion, steps, interp, ease, num_states, num_steps, num_items ):", "(1 - weights) result[init_nans | stop_nans] = np.nan # replace nans return result", "/ num_states * 2)) if num_states > 2: result = np.roll(result, num_roll, axis=-1)", "( 1 - ( ts[~index] * ts[~index] * ts[~index] - ts[~index] * np.sin(ts[~index]", "_interp_text(self, array, num_states, num_steps, num_result): result = np.repeat(array, num_steps, axis=-1) num_roll = -int(np.ceil(num_steps", "ts[index] ts[~index] = 2 * ts[~index] - 2 ts[~index] = 0.5 * ts[~index]", "REVERTS from .util import is_str class Easing(param.Parameterized): interp = param.ClassSelector( default=None, class_=Iterable, doc=f\"Interpolation", "da.attrs.get(\"is_errorbar_morph\") return da, name, dims, coords, interp, ease, is_bar, is_errorbar_morph def _prep_array(self, da):", "ts[~index] * ts[~index] + 1 return ts def _quintic(self, ts, ease): if ease", "(ts[index] - 1)) elif ease == \"out\": index = ts != 1 ts[~index]", "elif np.issubdtype(array_dtype, np.timedelta64): result = self._interp_time(array, pd.to_timedelta, *interp_args) elif np.issubdtype(array_dtype, np.number) and not", "np.arange(num_states) * num_steps indices[-1] -= 1 result[:, indices] = array # (1, num_states)", "(num_states - 1) * num_items).reshape( num_items, -1 ) weights = getattr(self, f\"_{interp.lower()}\")(tiled_steps, ease)", "(1 - ts) + 1 elif ease == \"in_out\": index = ts <", "= np.repeat(array, num_steps, axis=-1) num_roll = -int(np.ceil(num_steps / num_states * 2)) if num_states", "nans stop = np.repeat(array[:, 1:], num_steps, axis=-1) stop_nans = np.isnan(stop) tiled_steps = np.tile(steps,", "= ts * ts * ts * ts * ts elif ease ==", "ts[index1]) - (99 / 10.0 * ts[index1]) + 17 / 5.0 ) ts[index2]", "num_states = param.Integer(doc=\"Number of states\", **DEFAULTS[\"num_kwds\"]) num_steps = param.Integer( doc=\"Number of frames between", "array.astype(float) result = self._interp_numeric( array, steps, interp, ease, num_states, num_steps, num_items ) result", "ts) + 1 elif ease == \"in_out\": index = ts < 0.5 ts[index]", "0.5 ts[index] = 8 * ts[index] * ts[index] * ts[index] * ts[index] ts[~index]", "import LinearSegmentedColormap, rgb2hex from .configuration import DEFAULTS, EASES, INTERPS, PRECEDENCES, REVERTS from .util", "elif ease == \"in_out\": ts[index] = 0.5 * (1 - ts[index]) ts[~index] =", "def _linear(self, ts, ease): return ts def _quadratic(self, ts, ease): if ease ==", "1 result[:, indices] = array # (1, num_states) return result def _interp_fill(self, array,", "* np.pi)) elif ease == \"in_out\": index = ts < 0.5 ts[index] =", "ease == \"in\": ts = np.sin((ts - 1) * np.pi / 2) +", "- (ts * ts)) elif ease == \"out\": ts = np.sqrt((2 - ts)", "2 ) return ts def _back(self, ts, ease): if ease == \"in\": ts", "if \"grid_item\" in dims: da = da.stack({\"stacked\": [\"grid_item\", \"grid_y\", \"grid_x\"]}) elif \"batch\" in", "in dims: da = da.stack({\"stacked\": [item_dim, \"batch\"]}) da = da.transpose(\"stacked\", \"state\") coords =", "np.array(results) return result def _interp_text(self, array, num_states, num_steps, num_result): result = np.repeat(array, num_steps,", "== \"in_out\": ts[index] = 1 - (ts[index] * 2) ts[~index] = ts[~index] *", "ts[~index] * ts[~index] * ts[~index] + 1 return ts def _quartic(self, ts, ease):", "axis=-1) num_roll = -int(np.ceil(num_steps / num_states * 2)) if num_states > 2: result", "num_states, num_steps, num_items) array_dtype = array.dtype if name in [\"duration\", \"remark\", \"xerr\", \"yerr\"]", "interpolate(self, da, name=\"\"): interp = self.interp or \"cubic\" ease = self.ease da_origin =", "ease, num_states, num_steps, num_items ): array = array.astype(float) result = self._interp_numeric( array, steps,", "half_way:-half_way] if num_steps % 2 != 0: result = result[:, :-1] return result", "+ 1 ) return ts def _exponential(self, ts, ease): if ease == \"in\":", "* ts[~index] * ts[~index] * ts[~index] * ts[~index] + 1 ) return ts", "np.isnan(init) init[init_nans] = 0 # temporarily fill the nans stop = np.repeat(array[:, 1:],", "= np.sin(ts * np.pi / 2) elif ease == \"in_out\": ts = 0.5", "/ 2) elif ease == \"in_out\": ts = 0.5 * (1 - np.cos(ts", "num_result def _apply_revert(self, result, name): if result.ndim == 1: result_back = result[::-1] else:", "ts[index] * ts[index] ts[~index] = 2 * ts[~index] - 2 ts[~index] = 0.5", "= ts < 0.5 ts[index] = 0.5 * (1 - np.sqrt(1 - 4", "\"duration\" and self.revert == \"rollback\": result_back = np.repeat(1 / 60, result_back.shape[-1])[np.newaxis, :] result", "= result[:, :-1] return result def _interp_time( self, array, conversion, steps, interp, ease,", "= ts < 0.5 ts[index] = 16 * ts[index] * ts[index] * ts[index]", "* np.sin(ts * np.pi) elif ease == \"out\": ts = 1 - ts", "getattr(self, f\"_{interp.lower()}\")(tiled_steps, ease) result = stop * weights + init * (1 -", "== \"in\": ts = 1 - ts elif ease == \"in_out\": ts[index] =", "np.sin(ts * np.pi) elif ease == \"out\": ts = 1 - ts ts", "ts[~index] - 1) ts[~index] = ( 0.5 * ( 1 - ( ts[~index]", "-1 ) weights = getattr(self, f\"_{interp.lower()}\")(tiled_steps, ease) result = stop * weights +", "ts[~index] * 2 - 1 index0 = ts < 4 / 11 index1", "result_back = result[::-1] else: result_back = result[:, ::-1] if name == \"duration\" and", "array, num_states, num_steps, num_items, num_result, name): if is_str(array): fill = \"\" dtype =", "ts[~index] - 1) + 1)) * np.power(2, -10 * (2 * ts[~index] -", "np.pi) ) ts[~index] = 1 - (2 * ts[~index] - 1) ts[~index] =", "da.transpose(\"stacked\", \"state\") coords = da.drop_vars(\"state\", errors=\"ignore\").coords is_bar = da.attrs.get(\"is_bar\") is_errorbar_morph = da.attrs.get(\"is_errorbar_morph\") return", "\"out\": ts = 1 - ts ts = 1 - (ts * ts", "ts def _bounce(self, ts, ease): index = ts < 0.5 if ease ==", "= 1 - (ts[index] * 2) ts[~index] = ts[~index] * 2 - 1", "\"xerr\", \"yerr\"] and not is_errorbar_morph: result = self._interp_first( array, num_states, num_steps, num_items, num_result,", "but disregards the \" \"original's path durations\", precedence=PRECEDENCES[\"interp\"], ) num_states = param.Integer(doc=\"Number of", "return ts def _quintic(self, ts, ease): if ease == \"in\": ts = ts", "array, num_states, num_steps, name): indices = np.arange(num_states * num_steps - num_steps) result =", "ts, ease): if ease == \"in\": index = ts != 0 ts[~index] =", "= array.astype(float) result = self._interp_numeric( array, steps, interp, ease, num_states, num_steps, num_items )", "(4356 / 361.0 * ts[index2] * ts[index2]) - (35442 / 1805.0 * ts[index2])", "init_nans = np.isnan(init) init[init_nans] = 0 # temporarily fill the nans stop =", "ease == \"in\": ts = ts * ts * ts * ts *", "* (ts + 1)) * np.power(2, -10 * ts) + 1 elif ease", "return ts def _elastic(self, ts, ease): if ease == \"in\": ts = np.sin(13", "= ( 0.5 * ts[~index] * ts[~index] * ts[~index] * ts[~index] * ts[~index]", "== 1: result_back = result[::-1] else: result_back = result[:, ::-1] if name ==", "np.sin(ts * np.pi)) elif ease == \"in_out\": index = ts < 0.5 ts[index]", "ts = 0.5 * (1 - np.cos(ts * np.pi)) return ts def _circular(self,", "name) if is_xarray: result = self._rebuild_da(result, da, dims, coords) return result def _prep_xarray(self,", "num_states = array.shape if self.frames is None: if num_states < 10: num_steps =", "the \" \"original's path durations\", precedence=PRECEDENCES[\"interp\"], ) num_states = param.Integer(doc=\"Number of states\", **DEFAULTS[\"num_kwds\"])", "* (ts - 2)) elif ease == \"in_out\": index = ts < 0.5", "* np.pi / 2 * (ts + 1)) * np.power(2, -10 * ts)", "return da, name, dims, coords, interp, ease, is_bar, is_errorbar_morph def _prep_array(self, da): array", "= np.isnan(stop) tiled_steps = np.tile(steps, (num_states - 1) * num_items).reshape( num_items, -1 )", "if ease == \"in\": ts = ts * ts * ts elif ease", "ts = ts * ts * ts * ts elif ease == \"out\":", "num_steps, num_result def _apply_revert(self, result, name): if result.ndim == 1: result_back = result[::-1]", "stop = np.repeat(array[:, 1:], num_steps, axis=-1) stop_nans = np.isnan(stop) tiled_steps = np.tile(steps, (num_states", "- num_steps) result = ( pd.DataFrame( array, columns=np.arange(0, num_states * num_steps, num_steps), )", "= 0.5 * (1 - np.sqrt(1 - 4 * (ts[index] * ts[index]))) ts[~index]", "/ 11) & ~index0 index2 = (ts < 9 / 10) & ~index1", "ease, is_bar, is_errorbar_morph, ) = self._prep_xarray(da) array = self._prep_array(da) num_items, num_states, num_steps, num_result", "( np.sqrt(-((2 * ts[~index]) - 3) * ((2 * ts[~index]) - 1)) +", "Easing(param.Parameterized): interp = param.ClassSelector( default=None, class_=Iterable, doc=f\"Interpolation method; {INTERPS}\", precedence=PRECEDENCES[\"interp\"], ) ease =", "index = ts != 1 ts[~index] = 1 ts[index] = 1 - np.power(2,", "_bounce(self, ts, ease): index = ts < 0.5 if ease == \"in\": ts", "-1) return result def _interp_numeric( self, array, steps, interp, ease, num_states, num_steps, num_items", "+ (4 * ts[~index]) - 1 return ts def _cubic(self, ts, ease): if", "dims=da.dims, coords=coords, name=da.name, attrs=da.attrs, ) if \"stacked\" in result.dims: result = result.unstack().transpose(*dims) return", "dims, coords, interp, ease, is_bar, is_errorbar_morph def _prep_array(self, da): array = np.array(da) if", "for rgb in cmap(np.arange(num_result))]) result = np.array(results) return result def _interp_text(self, array, num_states,", "ts = np.sin(-13 * np.pi / 2 * (ts + 1)) * np.power(2,", "17 / 5.0 ) ts[index2] = ( (4356 / 361.0 * ts[index2] *", "_rebuild_da(self, result, da, dims, coords): if len(dims) == 1: result = result.squeeze() result", "num_steps) result = ( pd.DataFrame( array, columns=np.arange(0, num_states * num_steps, num_steps), ) .T.reindex(indices)", "= np.power(2, 10 * (ts[index] - 1)) elif ease == \"out\": index =", "ts[~index] + 1 return ts def _quartic(self, ts, ease): if ease == \"in\":", "ts, ease): if ease == \"in\": ts = 1 - np.sqrt(1 - (ts", "ease == \"in\": ts = ts * ts * ts * ts elif", "[\"grid_item\", \"grid_y\", \"grid_x\"]}) elif \"batch\" in dims: da = da.stack({\"stacked\": [item_dim, \"batch\"]}) da", "ts * ts elif ease == \"out\": ts = -(ts * (ts -", "= self._interp_time(array, pd.to_datetime, *interp_args) elif np.issubdtype(array_dtype, np.timedelta64): result = self._interp_time(array, pd.to_timedelta, *interp_args) elif", "(ts * ts)) elif ease == \"out\": ts = np.sqrt((2 - ts) *", "* ts[index1]) - (99 / 10.0 * ts[index1]) + 17 / 5.0 )", "* ts[index] ts[index] = 0.5 * ( ts[index] * ts[index] * ts[index] -", "\"in\": ts = 1 - np.sqrt(1 - (ts * ts)) elif ease ==", "ease == \"in_out\": ts[index] = 0.5 * (1 - ts[index]) ts[~index] = 0.5", "== \"in\": index = ts != 0 ts[~index] = 0 ts[index] = np.power(2,", "state\", precedence=PRECEDENCES[\"interp\"], ) revert = param.ObjectSelector( default=None, objects=REVERTS, doc=\"Method for reverting to the", "(363 / 40.0 * ts[index1] * ts[index1]) - (99 / 10.0 * ts[index1])", "- ts ts = 1 - (ts * ts * ts - ts", "self._interp_fill(array, num_states, num_steps, name) else: # str result = self._interp_text(array, num_states, num_steps, num_result)", "1) + 1 elif ease == \"in_out\": index = ts < 0.5 ts[index]", "precedence=PRECEDENCES[\"interp\"], ) revert = param.ObjectSelector( default=None, objects=REVERTS, doc=\"Method for reverting to the initial", "\"in\": ts = 1 - ts elif ease == \"in_out\": ts[index] = 1", "* ((2 * ts[index]) - 1)) ) ts[~index] = 0.5 * ( np.sin(-13", "- 2)) elif ease == \"in_out\": index = ts < 0.5 ts[index] =", "return da_origin steps = np.linspace(0, 1, num_steps) interp_args = (steps, interp, ease, num_states,", "be after number result = self._interp_color(array, num_result) elif is_bar: result = self._interp_fill(array, num_states,", "self._interp_time(array, pd.to_datetime, *interp_args) elif np.issubdtype(array_dtype, np.timedelta64): result = self._interp_time(array, pd.to_timedelta, *interp_args) elif np.issubdtype(array_dtype,", "_calc_shapes(self, array): num_items, num_states = array.shape if self.frames is None: if num_states <", "\"out\": ts = np.sin(-13 * np.pi / 2 * (ts + 1)) *", "dims, coords) return result def _prep_xarray(self, da): name = da.name interp = da.attrs.get(\"interp\")", "\" \"rollback is like traceback, but disregards the \" \"original's path durations\", precedence=PRECEDENCES[\"interp\"],", "num_roll = -int(np.ceil(num_steps / num_states * 2)) if num_states > 2: result =", "( da, name, dims, coords, interp, ease, is_bar, is_errorbar_morph, ) = self._prep_xarray(da) array", "= ts * ts * ts elif ease == \"out\": ts = (ts", "index = ts < 0.5 ts[index] = 0.5 * (1 - np.sqrt(1 -", "da, name, dims, coords, interp, ease, is_bar, is_errorbar_morph def _prep_array(self, da): array =", "== \"boomerang\": array = np.hstack([array, array[:, :1]]) return array def _calc_shapes(self, array): num_items,", "np.full((num_items, num_result), fill, dtype=dtype) indices = np.arange(num_states) * num_steps indices[-1] -= 1 result[:,", "ts = np.sin(ts * np.pi / 2) elif ease == \"in_out\": ts =", "else: result = result.values return result def _interp_color(self, array, num_result): results = []", "ts[~index] = (2 * ts[~index]) - 2 ts[~index] = ( 0.5 * ts[~index]", "name): indices = np.arange(num_states * num_steps - num_steps) result = ( pd.DataFrame( array,", "* ts[~index] + 1 return ts def _quintic(self, ts, ease): if ease ==", "da.stack({\"stacked\": [item_dim, \"batch\"]}) da = da.transpose(\"stacked\", \"state\") coords = da.drop_vars(\"state\", errors=\"ignore\").coords is_bar =", "dtype = None result = np.full((num_items, num_result), fill, dtype=dtype) indices = np.arange(num_states) *", "xarray as xr from matplotlib.colors import LinearSegmentedColormap, rgb2hex from .configuration import DEFAULTS, EASES,", "if num_steps % 2 != 0: result = result[:, :-1] return result def", "np.isnan(stop) tiled_steps = np.tile(steps, (num_states - 1) * num_items).reshape( num_items, -1 ) weights", "num_result, name): if is_str(array): fill = \"\" dtype = np.object else: fill =", "as pd import param import xarray as xr from matplotlib.colors import LinearSegmentedColormap, rgb2hex", "): array = array.astype(float) result = self._interp_numeric( array, steps, interp, ease, num_states, num_steps,", "index = ts < 0.5 ts[index] = 8 * ts[index] * ts[index] *", "ease): if ease == \"in\": ts = ts * ts * ts -", "- 1)) ) ts[~index] = 0.5 * ( np.sin(-13 * np.pi / 2", "\"fill\" or name.endswith( (\"zoom\", \"discrete_trail\", \"morph_trail\", \"tick_label\", \"bar_label\") ): result = self._interp_fill(array, num_states,", "**DEFAULTS[\"num_kwds\"] ) def __init__(self, **kwds): super().__init__(**kwds) def interpolate(self, da, name=\"\"): interp = self.interp", "= param.ObjectSelector( default=None, objects=REVERTS, doc=\"Method for reverting to the initial state; \" \"boomerang", "of states\", **DEFAULTS[\"num_kwds\"]) num_steps = param.Integer( doc=\"Number of frames between each base state\",", "each base state\", **DEFAULTS[\"num_kwds\"] ) def __init__(self, **kwds): super().__init__(**kwds) def interpolate(self, da, name=\"\"):", "== \"out\": pass elif ease == \"in_out\": ts[index] = 0.5 * (1 -", "1) * (ts - 1) + 1 elif ease == \"in_out\": index =", "\"state\", ...) break dims = da.dims if da.ndim > 2: # more than", "self._rebuild_da(result, da, dims, coords) return result def _prep_xarray(self, da): name = da.name interp", "num_result, name ) elif interp == \"fill\" or name.endswith( (\"zoom\", \"discrete_trail\", \"morph_trail\", \"tick_label\",", "num_states, num_steps, num_items ) result = conversion(result.ravel()).values result = result.reshape(num_items, -1) return result", "fill = \"\" dtype = np.object else: fill = 0.0 dtype = None", "* ts * ts - ts * np.sin(ts * np.pi) elif ease ==", "ts[index] = ( 0.5 * np.sin(13 * np.pi / 2 * (2 *", "- (ts[index] * 2) ts[~index] = ts[~index] * 2 - 1 index0 =", "between each base state\", precedence=PRECEDENCES[\"interp\"], ) revert = param.ObjectSelector( default=None, objects=REVERTS, doc=\"Method for", "0.5) & (ts != 1) index1 = (ts != 0) & (ts >=", "* ts[index2] * ts[index2]) - (35442 / 1805.0 * ts[index2]) + 16061 /", "elif ease == \"in_out\": ts[index] = 1 - (ts[index] * 2) ts[~index] =", "9 / 10 ts[index0] = 121 * ts[index0] * ts[index0] / 16 ts[index1]", "pass elif ease == \"in_out\": ts[index] = 0.5 * (1 - ts[index]) ts[~index]", "in dims: da = da.stack({\"stacked\": [\"grid_item\", \"grid_y\", \"grid_x\"]}) elif \"batch\" in dims: da", "10) ts[index1] = -0.5 * np.power(2, (-20 * ts[index1]) + 10) + 1", "- 1) * (ts - 1) * (ts - 1) + 1 elif", "each base state\", precedence=PRECEDENCES[\"interp\"], ) revert = param.ObjectSelector( default=None, objects=REVERTS, doc=\"Method for reverting", "\"out\": ts = np.sin(ts * np.pi / 2) elif ease == \"in_out\": ts", "(35442 / 1805.0 * ts[index2]) + 16061 / 1805.0 ) ts[index3] = (", "ts elif ease == \"in_out\": ts[index] = 1 - (ts[index] * 2) ts[~index]", "* ((2 * ts[~index]) - 1)) + 1 ) return ts def _exponential(self,", "da.dims: return da_origin ( da, name, dims, coords, interp, ease, is_bar, is_errorbar_morph, )", "nans return result def _linear(self, ts, ease): return ts def _quadratic(self, ts, ease):", "np.pi)) elif ease == \"in_out\": index = ts < 0.5 ts[index] = 2", "\"item\" in item_dim: if \"batch\" in da.dims: da = da.transpose(item_dim, \"batch\", \"state\", ...)", ") if ease == \"in\": ts = 1 - ts elif ease ==", "ts * ts - ts * np.sin(ts * np.pi) elif ease == \"out\":", "% 2 != 0: result = result[:, :-1] return result def _interp_time( self,", ":1]]) return array def _calc_shapes(self, array): num_items, num_states = array.shape if self.frames is", "ts[index]) - 1)) ) ts[~index] = 0.5 * ( np.sin(-13 * np.pi /", "base state\", **DEFAULTS[\"num_kwds\"] ) def __init__(self, **kwds): super().__init__(**kwds) def interpolate(self, da, name=\"\"): interp", "is_errorbar_morph, ) = self._prep_xarray(da) array = self._prep_array(da) num_items, num_states, num_steps, num_result = self._calc_shapes(array)", "1 or num_states == 1) and self.revert is None: return da_origin steps =", "+ 1 ) return ts def _sine(self, ts, ease): if ease == \"in\":", "param.edit_constant(self): self.num_steps = num_steps num_result = (num_states - 1) * num_steps return num_items,", "num_states, num_steps, num_items ): array = array.astype(float) result = self._interp_numeric( array, steps, interp,", "ts[index] = 8 * ts[index] * ts[index] * ts[index] * ts[index] ts[~index] =", "elif ease == \"in_out\": index0 = (ts != 0) & (ts < 0.5)", "result = result.squeeze() result = xr.DataArray( result, dims=da.dims, coords=coords, name=da.name, attrs=da.attrs, ) if", "2 - 1 index0 = ts < 4 / 11 index1 = (ts", "if \"stacked\" in result.dims: result = result.unstack().transpose(*dims) return result def _interp_first(self, array, num_states,", "ease == \"out\": ts = np.sin(-13 * np.pi / 2 * (ts +", "item, state cmap = LinearSegmentedColormap.from_list(\"eased\", colors, N=num_result) results.append([rgb2hex(rgb) for rgb in cmap(np.arange(num_result))]) result", "10 * ((2 * ts[index]) - 1)) ) ts[~index] = 0.5 * (", "DEFAULTS, EASES, INTERPS, PRECEDENCES, REVERTS from .util import is_str class Easing(param.Parameterized): interp =", "ts[index] * ts[index] * ts[index] * ts[index] * ts[index] ts[~index] = (2 *", "result = result.reshape(num_items, -1) return result def _interp_numeric( self, array, steps, interp, ease,", "= ts < 0.5 ts[index] = 8 * ts[index] * ts[index] * ts[index]", "default=None, bounds=(1, None), doc=\"Number of frames between each base state\", precedence=PRECEDENCES[\"interp\"], ) revert", "result, dims=da.dims, coords=coords, name=da.name, attrs=da.attrs, ) if \"stacked\" in result.dims: result = result.unstack().transpose(*dims)", "self, array, steps, interp, ease, num_states, num_steps, num_items ): init = np.repeat(array[:, :-1],", "result = conversion(result.ravel()).values result = result.reshape(num_items, -1) return result def _interp_numeric( self, array,", "num_states) return result def _interp_fill(self, array, num_states, num_steps, name): indices = np.arange(num_states *", "\"linear\" result = self._interp_numeric(array, *interp_args) elif name in \"c\": # must be after", "(ts < 9 / 10) & ~index1 & ~index0 index3 = ts >=", ") ts[index3] = ( (54 / 5.0 * ts[index3] * ts[index3]) - (513", "= int(np.ceil(60 / num_states)) else: num_steps = int(np.ceil(100 / num_states)) else: num_steps =", "self.frames with param.edit_constant(self): self.num_steps = num_steps num_result = (num_states - 1) * num_steps", "\"in_out\": ts = 0.5 * (1 - np.cos(ts * np.pi)) return ts def", "= np.sin(13 * np.pi / 2 * ts) * np.power(2, 10 * (ts", "self._prep_array(da) num_items, num_states, num_steps, num_result = self._calc_shapes(array) if (num_steps == 1 or num_states", "60, result_back.shape[-1])[np.newaxis, :] result = np.hstack([result, result_back]) return result def _rebuild_da(self, result, da,", "(1 - np.cos(ts * np.pi)) return ts def _circular(self, ts, ease): if ease", "ts[~index]) - 1)) + 1 ) return ts def _exponential(self, ts, ease): if", "ease = da.attrs.get(\"ease\") for item_dim in da.dims: if \"item\" in item_dim: if \"batch\"", "and self.revert is None: return da_origin steps = np.linspace(0, 1, num_steps) interp_args =", "+ 10) + 1 return ts def _elastic(self, ts, ease): if ease ==", "import pandas as pd import param import xarray as xr from matplotlib.colors import", "dims, coords): if len(dims) == 1: result = result.squeeze() result = xr.DataArray( result,", "= np.arange(num_states * num_steps - num_steps) result = ( pd.DataFrame( array, columns=np.arange(0, num_states", "np.sin(13 * np.pi / 2 * ts) * np.power(2, 10 * (ts -", "\"in\": ts = ts * ts * ts elif ease == \"out\": ts", "\"out\": ts = np.sqrt((2 - ts) * ts) elif ease == \"in_out\": index", "= -0.5 * np.power(2, (-20 * ts[index1]) + 10) + 1 return ts", "((2 * ts[~index] - 1) + 1)) * np.power(2, -10 * (2 *", "ts = ts * ts * ts * ts * ts elif ease", "return result def _prep_xarray(self, da): name = da.name interp = da.attrs.get(\"interp\") ease =", "0.0 dtype = None result = np.full((num_items, num_result), fill, dtype=dtype) indices = np.arange(num_states)", "ease == \"in\": ts = 1 - ts elif ease == \"out\": pass", "np.object else: fill = 0.0 dtype = None result = np.full((num_items, num_result), fill,", "ease == \"in\": ts = ts * ts * ts - ts *", "* (2 * ts[index])) * np.power(2, 10 * ((2 * ts[index]) - 1))", "is_bar, is_errorbar_morph, ) = self._prep_xarray(da) array = self._prep_array(da) num_items, num_states, num_steps, num_result =", "\"batch\"]}) da = da.transpose(\"stacked\", \"state\") coords = da.drop_vars(\"state\", errors=\"ignore\").coords is_bar = da.attrs.get(\"is_bar\") is_errorbar_morph", "ts[index] * ts[index] ts[~index] = (-2 * ts[~index] * ts[~index]) + (4 *", "np.pi / 2 * (2 * ts[index])) * np.power(2, 10 * ((2 *", "result[::-1] else: result_back = result[:, ::-1] if name == \"duration\" and self.revert ==", "= np.sin(-13 * np.pi / 2 * (ts + 1)) * np.power(2, -10", "= 0.5 * (1 - np.cos(ts * np.pi)) return ts def _circular(self, ts,", "self._interp_first( array, num_states, num_steps, num_items, num_result, name ) elif interp == \"fill\" or", "_exponential(self, ts, ease): if ease == \"in\": index = ts != 0 ts[~index]", "/ 5.0 * ts[index3] * ts[index3]) - (513 / 25.0 * ts[index3]) +", "ease == \"out\": ts = (ts - 1) * (ts - 1) *", "ease): return ts def _quadratic(self, ts, ease): if ease == \"in\": ts =", "* ts[index] * ts[index] ts[~index] = ts[~index] - 1 ts[~index] = -8 *", "xr.DataArray) is_bar = False if is_xarray: if \"state\" not in da.dims: return da_origin", "1 return ts def _quartic(self, ts, ease): if ease == \"in\": ts =", "ts[index] * ts[index] - ts[index] * np.sin(ts[index] * np.pi) ) ts[~index] = 1", "= ts != 0 ts[~index] = 0 ts[index] = np.power(2, 10 * (ts[index]", "ts[~index] * ts[~index] * ts[~index] * ts[~index] + 1 ) return ts def", "elif ease == \"in_out\": index = ts < 0.5 ts[index] = 0.5 *", "\"rollback\": result_back = np.repeat(1 / 60, result_back.shape[-1])[np.newaxis, :] result = np.hstack([result, result_back]) return", "num_steps, num_items ) result = conversion(result.ravel()).values result = result.reshape(num_items, -1) return result def", "ts elif ease == \"out\": pass elif ease == \"in_out\": ts[index] = 0.5", "0.5 ts[index] = 2 * ts[index] * ts[index] ts[~index] = (-2 * ts[~index]", "elif ease == \"out\": ts = (ts - 1) * (ts - 1)", "- ts) + 1 elif ease == \"in_out\": index = ts < 0.5", "< 0.5 ts[index] = 16 * ts[index] * ts[index] * ts[index] * ts[index]", "ts[index])) * np.power(2, 10 * ((2 * ts[index]) - 1)) ) ts[~index] =", "pd.to_timedelta, *interp_args) elif np.issubdtype(array_dtype, np.number) and not is_bar: if name == \"central_longitude\": interp", "da.drop_vars(\"state\", errors=\"ignore\").coords is_bar = da.attrs.get(\"is_bar\") is_errorbar_morph = da.attrs.get(\"is_errorbar_morph\") return da, name, dims, coords,", "= ts >= 9 / 10 ts[index0] = 121 * ts[index0] * ts[index0]", "* (ts[index] - 1)) elif ease == \"out\": index = ts != 1", "1) * num_items).reshape( num_items, -1 ) weights = getattr(self, f\"_{interp.lower()}\")(tiled_steps, ease) result =", "= 1 - ts ts = 1 - (ts * ts * ts", "num_result) elif is_bar: result = self._interp_fill(array, num_states, num_steps, name) else: # str result", "result = result.unstack().transpose(*dims) return result def _interp_first(self, array, num_states, num_steps, num_items, num_result, name):", "ease, num_states, num_steps, num_items ): init = np.repeat(array[:, :-1], num_steps, axis=-1) init_nans =", "self._interp_numeric(array, *interp_args) elif name in \"c\": # must be after number result =", "10 ts[index0] = 121 * ts[index0] * ts[index0] / 16 ts[index1] = (", "ts < 0.5 ts[index] = 2 * ts[index] * ts[index] ts[~index] = (-2", "num_steps, axis=-1) stop_nans = np.isnan(stop) tiled_steps = np.tile(steps, (num_states - 1) * num_items).reshape(", "10 * (ts[index] - 1)) elif ease == \"out\": index = ts !=", "rgb2hex from .configuration import DEFAULTS, EASES, INTERPS, PRECEDENCES, REVERTS from .util import is_str", "to the initial state; \" \"boomerang finds the shortest path to the initial", "* ts[index] * ts[index] * ts[index] ts[~index] = 2 * ts[~index] - 2", "8 / 11) & ~index0 index2 = (ts < 9 / 10) &", "ease == \"in_out\": index0 = (ts != 0) & (ts < 0.5) &", "(54 / 5.0 * ts[index3] * ts[index3]) - (513 / 25.0 * ts[index3])", "\"in\": ts = np.sin((ts - 1) * np.pi / 2) + 1 elif", "1 return ts def _quintic(self, ts, ease): if ease == \"in\": ts =", "num_states < 10: num_steps = int(np.ceil(60 / num_states)) else: num_steps = int(np.ceil(100 /", "* ts[index]) elif ease == \"in_out\": index0 = (ts != 0) & (ts", "ts = (ts - 1) * (ts - 1) * (ts - 1)", "== \"in\": ts = 1 - ts elif ease == \"out\": pass elif", "/ 2) result = result[:, half_way:-half_way] if num_steps % 2 != 0: result", "* ((2 * ts[~index] - 1) + 1)) * np.power(2, -10 * (2", "da = da.transpose(item_dim, \"batch\", \"state\", ...) else: da = da.transpose(item_dim, \"state\", ...) break", "def _apply_revert(self, result, name): if result.ndim == 1: result_back = result[::-1] else: result_back", "result.reshape(num_items, -1) return result def _interp_numeric( self, array, steps, interp, ease, num_states, num_steps,", "* np.power(2, -10 * ts) + 1 elif ease == \"in_out\": index =", "self._apply_revert(result, name) if is_xarray: result = self._rebuild_da(result, da, dims, coords) return result def", "* ts[~index] * ts[~index] * ts[~index] + 1 ) return ts def _sine(self,", "INTERPS, PRECEDENCES, REVERTS from .util import is_str class Easing(param.Parameterized): interp = param.ClassSelector( default=None,", "== \"out\": ts = (ts - 1) * (ts - 1) * (ts", "- (2 * ts[~index] - 1) ts[~index] = ( 0.5 * ( 1", "0 # temporarily fill the nans stop = np.repeat(array[:, 1:], num_steps, axis=-1) stop_nans", "da_origin steps = np.linspace(0, 1, num_steps) interp_args = (steps, interp, ease, num_states, num_steps,", "dims: da = da.stack({\"stacked\": [\"grid_item\", \"grid_y\", \"grid_x\"]}) elif \"batch\" in dims: da =", "array def _calc_shapes(self, array): num_items, num_states = array.shape if self.frames is None: if", "if is_str(array): fill = \"\" dtype = np.object else: fill = 0.0 dtype", "* np.pi / 2 * ((2 * ts[~index] - 1) + 1)) *", "= (ts < 8 / 11) & ~index0 index2 = (ts < 9", "base state\", precedence=PRECEDENCES[\"interp\"], ) revert = param.ObjectSelector( default=None, objects=REVERTS, doc=\"Method for reverting to", "1 - ts ts = 1 - (ts * ts * ts -", "\"rollback is like traceback, but disregards the \" \"original's path durations\", precedence=PRECEDENCES[\"interp\"], )", "1805.0 * ts[index2]) + 16061 / 1805.0 ) ts[index3] = ( (54 /", "1 ts[index] = 1 - np.power(2, -10 * ts[index]) elif ease == \"in_out\":", "result = self._interp_numeric(array, *interp_args) elif name in \"c\": # must be after number", "1 - ts elif ease == \"in_out\": ts[index] = 1 - (ts[index] *", "== 1) and self.revert is None: return da_origin steps = np.linspace(0, 1, num_steps)", "((2 * ts[~index]) - 1)) + 1 ) return ts def _exponential(self, ts,", "cmap = LinearSegmentedColormap.from_list(\"eased\", colors, N=num_result) results.append([rgb2hex(rgb) for rgb in cmap(np.arange(num_result))]) result = np.array(results)", "result = self._interp_numeric( array, steps, interp, ease, num_states, num_steps, num_items ) result =", "ease): if ease == \"in\": ts = ts * ts * ts *", "- 2 ts[~index] = 0.5 * ts[~index] * ts[~index] * ts[~index] + 1", "da, name, dims, coords, interp, ease, is_bar, is_errorbar_morph, ) = self._prep_xarray(da) array =", "number result = self._interp_color(array, num_result) elif is_bar: result = self._interp_fill(array, num_states, num_steps, name)", "colors in array: # item, state cmap = LinearSegmentedColormap.from_list(\"eased\", colors, N=num_result) results.append([rgb2hex(rgb) for", "elif is_bar: result = self._interp_fill(array, num_states, num_steps, name) else: # str result =", "array # (1, num_states) return result def _interp_fill(self, array, num_states, num_steps, name): indices", "num_steps % 2 != 0: result = result[:, :-1] return result def _interp_time(", "ts * ts * ts elif ease == \"out\": ts = (ts -", "1 return ts def _cubic(self, ts, ease): if ease == \"in\": ts =", "np.sqrt(1 - 4 * (ts[index] * ts[index]))) ts[~index] = 0.5 * ( np.sqrt(-((2", "-1] else: result = result.values return result def _interp_color(self, array, num_result): results =", "ts[index] * ts[index] * ts[index] - ts[index] * np.sin(ts[index] * np.pi) ) ts[~index]", "ease) result = stop * weights + init * (1 - weights) result[init_nans", "ts * np.sin(ts * np.pi)) elif ease == \"in_out\": index = ts <", "* ( 1 - ( ts[~index] * ts[~index] * ts[~index] - ts[~index] *", "ts[index3] = ( (54 / 5.0 * ts[index3] * ts[index3]) - (513 /", "param.Integer(doc=\"Number of states\", **DEFAULTS[\"num_kwds\"]) num_steps = param.Integer( doc=\"Number of frames between each base", "16 ts[index1] = ( (363 / 40.0 * ts[index1] * ts[index1]) - (99", "= (ts != 0) & (ts >= 0.5) & (ts != 1) ts[index0]", "& (ts < 0.5) & (ts != 1) index1 = (ts != 0)", "num_steps, name): indices = np.arange(num_states * num_steps - num_steps) result = ( pd.DataFrame(", "1 return ts def _elastic(self, ts, ease): if ease == \"in\": ts =", "xr from matplotlib.colors import LinearSegmentedColormap, rgb2hex from .configuration import DEFAULTS, EASES, INTERPS, PRECEDENCES,", "== \"in\": ts = 1 - np.sqrt(1 - (ts * ts)) elif ease", "ease == \"in_out\": index = ts < 0.5 ts[index] = ( 0.5 *", "def _exponential(self, ts, ease): if ease == \"in\": index = ts != 0", "* ts[index0] / 16 ts[index1] = ( (363 / 40.0 * ts[index1] *", "* ts[index] * ts[index] * ts[index] * ts[index] ts[~index] = (2 * ts[~index])", "ts[index] * ts[index] * ts[index] * ts[index] ts[~index] = ts[~index] - 1 ts[~index]", "= ts < 0.5 if ease == \"in\": ts = 1 - ts", "\"out\": ts = (ts - 1) * (ts - 1) * (ts -", "da.copy() is_xarray = isinstance(da, xr.DataArray) is_bar = False if is_xarray: if \"state\" not", "ts, ease): if ease == \"in\": ts = ts * ts elif ease", ") return ts def _sine(self, ts, ease): if ease == \"in\": ts =", "def _circular(self, ts, ease): if ease == \"in\": ts = 1 - np.sqrt(1", "(num_states - 1) * num_steps return num_items, num_states, num_steps, num_result def _apply_revert(self, result,", "= None result = np.full((num_items, num_result), fill, dtype=dtype) indices = np.arange(num_states) * num_steps", "result = self._interp_text(array, num_states, num_steps, num_result) if self.revert in [\"traceback\", \"rollback\"]: result =", "def _bounce(self, ts, ease): index = ts < 0.5 if ease == \"in\":", "- 1) * (ts - 1) * (ts - 1) * (ts -", "if len(dims) == 1: result = result.squeeze() result = xr.DataArray( result, dims=da.dims, coords=coords,", "1) index1 = (ts != 0) & (ts >= 0.5) & (ts !=", ") = self._prep_xarray(da) array = self._prep_array(da) num_items, num_states, num_steps, num_result = self._calc_shapes(array) if", "np.hstack([array, array[:, :1]]) return array def _calc_shapes(self, array): num_items, num_states = array.shape if", "* num_steps return num_items, num_states, num_steps, num_result def _apply_revert(self, result, name): if result.ndim", "= np.object else: fill = 0.0 dtype = None result = np.full((num_items, num_result),", "* ts[~index] + 1 return ts def _quartic(self, ts, ease): if ease ==", "* ( np.sin(-13 * np.pi / 2 * ((2 * ts[~index] - 1)", "return num_items, num_states, num_steps, num_result def _apply_revert(self, result, name): if result.ndim == 1:", "- np.cos(ts * np.pi)) return ts def _circular(self, ts, ease): if ease ==", "- 4 * (ts[index] * ts[index]))) ts[~index] = 0.5 * ( np.sqrt(-((2 *", "da = da.transpose(\"stacked\", \"state\") coords = da.drop_vars(\"state\", errors=\"ignore\").coords is_bar = da.attrs.get(\"is_bar\") is_errorbar_morph =", "path to the initial state, \" \"traceback backtracks the original path to the", "* (ts - 1) + 1 elif ease == \"in_out\": index = ts", "ease == \"in\": index = ts != 0 ts[~index] = 0 ts[index] =", "/ 2) + 1 elif ease == \"out\": ts = np.sin(ts * np.pi", "num_steps = param.Integer( doc=\"Number of frames between each base state\", **DEFAULTS[\"num_kwds\"] ) def", "rgb in cmap(np.arange(num_result))]) result = np.array(results) return result def _interp_text(self, array, num_states, num_steps,", "def _cubic(self, ts, ease): if ease == \"in\": ts = ts * ts", "elif ease == \"out\": ts = -(ts * (ts - 2)) elif ease", "state) if \"grid_item\" in dims: da = da.stack({\"stacked\": [\"grid_item\", \"grid_y\", \"grid_x\"]}) elif \"batch\"", "(\"zoom\", \"discrete_trail\", \"morph_trail\", \"tick_label\", \"bar_label\") ): result = self._interp_fill(array, num_states, num_steps, name) elif", "ts[index] = 2 * ts[index] ts[index] = 0.5 * ( ts[index] * ts[index]", "- 1)) elif ease == \"out\": index = ts != 1 ts[~index] =", "1)) + 2 ) return ts def _back(self, ts, ease): if ease ==", "np.power(2, 10 * ((2 * ts[index]) - 1)) ) ts[~index] = 0.5 *", "ts[index]))) ts[~index] = 0.5 * ( np.sqrt(-((2 * ts[~index]) - 3) * ((2", "+ 1 elif ease == \"in_out\": index = ts < 0.5 ts[index] =", "1 - ts elif ease == \"out\": pass elif ease == \"in_out\": ts[index]", "np.number) and not is_bar: if name == \"central_longitude\": interp = \"linear\" result =", "ts[index] ts[~index] = (-2 * ts[~index] * ts[~index]) + (4 * ts[~index]) -", "== \"out\": ts = 1 - ts ts = 1 - (ts *", "= result[:, half_way:-half_way] if num_steps % 2 != 0: result = result[:, :-1]", "- 1) * num_steps return num_items, num_states, num_steps, num_result def _apply_revert(self, result, name):", "steps = np.linspace(0, 1, num_steps) interp_args = (steps, interp, ease, num_states, num_steps, num_items)", ".T ) if not name.endswith(\"discrete_trail\"): result = result.ffill(axis=1).fillna(\"\").values result[:, -1] = array[:, -1]", "(ts != 1) index1 = (ts != 0) & (ts >= 0.5) &", "= np.nan # replace nans return result def _linear(self, ts, ease): return ts", "1) * (1 - ts) + 1 elif ease == \"in_out\": index =", "ts[index]) elif ease == \"in_out\": index0 = (ts != 0) & (ts <", "class_=Iterable, doc=f\"Interpolation method; {INTERPS}\", precedence=PRECEDENCES[\"interp\"], ) ease = param.ClassSelector( default=\"in_out\", class_=Iterable, doc=f\"Type of", "0.5 * (1 - np.cos(ts * np.pi)) return ts def _circular(self, ts, ease):", "-0.5 * np.power(2, (-20 * ts[index1]) + 10) + 1 return ts def", "- weights) result[init_nans | stop_nans] = np.nan # replace nans return result def", "ts >= 9 / 10 ts[index0] = 121 * ts[index0] * ts[index0] /", "da.transpose(item_dim, \"state\", ...) break dims = da.dims if da.ndim > 2: # more", "0.5 ts[index] = 16 * ts[index] * ts[index] * ts[index] * ts[index] *", "* ts[index] - ts[index] * np.sin(ts[index] * np.pi) ) ts[~index] = 1 -", ") ts[~index] = 0.5 * ( np.sin(-13 * np.pi / 2 * ((2", "& (ts >= 0.5) & (ts != 1) ts[index0] = 0.5 * np.power(2,", "(ts[index] * 2) ts[~index] = ts[~index] * 2 - 1 index0 = ts", "self._calc_shapes(array) if (num_steps == 1 or num_states == 1) and self.revert is None:", "/ 25.0 * ts[index3]) + 268 / 25.0 ) if ease == \"in\":", "* np.power(2, (20 * ts[index0]) - 10) ts[index1] = -0.5 * np.power(2, (-20", "result = np.full((num_items, num_result), fill, dtype=dtype) indices = np.arange(num_states) * num_steps indices[-1] -=", "da = da.stack({\"stacked\": [item_dim, \"batch\"]}) da = da.transpose(\"stacked\", \"state\") coords = da.drop_vars(\"state\", errors=\"ignore\").coords", "num_result) if self.revert in [\"traceback\", \"rollback\"]: result = self._apply_revert(result, name) if is_xarray: result", "* ts[index] * ts[index] ts[~index] = (2 * ts[~index]) - 2 ts[~index] =", "0 ts[~index] = 0 ts[index] = np.power(2, 10 * (ts[index] - 1)) elif", "len(dims) == 1: result = result.squeeze() result = xr.DataArray( result, dims=da.dims, coords=coords, name=da.name,", "* (1 - ts) + 1 elif ease == \"in_out\": index = ts", "1:], num_steps, axis=-1) stop_nans = np.isnan(stop) tiled_steps = np.tile(steps, (num_states - 1) *", "* np.pi / 2 * ts) * np.power(2, 10 * (ts - 1))", "result[:, -1] = array[:, -1] else: result = result.values return result def _interp_color(self,", "== \"in\": ts = ts * ts * ts * ts * ts", "= self._calc_shapes(array) if (num_steps == 1 or num_states == 1) and self.revert is", "da.name interp = da.attrs.get(\"interp\") ease = da.attrs.get(\"ease\") for item_dim in da.dims: if \"item\"", "def _interp_color(self, array, num_result): results = [] for colors in array: # item,", "in da.dims: da = da.transpose(item_dim, \"batch\", \"state\", ...) else: da = da.transpose(item_dim, \"state\",", "if \"item\" in item_dim: if \"batch\" in da.dims: da = da.transpose(item_dim, \"batch\", \"state\",", ".util import is_str class Easing(param.Parameterized): interp = param.ClassSelector( default=None, class_=Iterable, doc=f\"Interpolation method; {INTERPS}\",", "if da.ndim > 2: # more than (item, state) if \"grid_item\" in dims:", "- ts elif ease == \"in_out\": ts[index] = 1 - (ts[index] * 2)", "ts[~index] = ( 0.5 * ts[~index] * ts[~index] * ts[~index] * ts[~index] *", "0.5 * np.power(2, (20 * ts[index0]) - 10) ts[index1] = -0.5 * np.power(2,", "* ts[index] ts[~index] = ts[~index] - 1 ts[~index] = -8 * ts[~index] *", "ease): if ease == \"in\": ts = 1 - np.sqrt(1 - (ts *", "* ts * ts - ts * np.sin(ts * np.pi)) elif ease ==", "precedence=PRECEDENCES[\"interp\"], ) num_states = param.Integer(doc=\"Number of states\", **DEFAULTS[\"num_kwds\"]) num_steps = param.Integer( doc=\"Number of", "name ) elif interp == \"fill\" or name.endswith( (\"zoom\", \"discrete_trail\", \"morph_trail\", \"tick_label\", \"bar_label\")", "da.attrs.get(\"ease\") for item_dim in da.dims: if \"item\" in item_dim: if \"batch\" in da.dims:", "= 2 * ts[index] ts[index] = 0.5 * ( ts[index] * ts[index] *", "index0 = ts < 4 / 11 index1 = (ts < 8 /", "if num_states < 10: num_steps = int(np.ceil(60 / num_states)) else: num_steps = int(np.ceil(100", "else: num_steps = self.frames with param.edit_constant(self): self.num_steps = num_steps num_result = (num_states -", "half_way = int(num_result / 2) result = result[:, half_way:-half_way] if num_steps % 2", "2 != 0: result = result[:, :-1] return result def _interp_time( self, array,", "~index1 & ~index0 index3 = ts >= 9 / 10 ts[index0] = 121", "def _prep_xarray(self, da): name = da.name interp = da.attrs.get(\"interp\") ease = da.attrs.get(\"ease\") for", "np.power(2, (-20 * ts[index1]) + 10) + 1 return ts def _elastic(self, ts,", "pd.to_datetime, *interp_args) elif np.issubdtype(array_dtype, np.timedelta64): result = self._interp_time(array, pd.to_timedelta, *interp_args) elif np.issubdtype(array_dtype, np.number)", "coords, interp, ease, is_bar, is_errorbar_morph def _prep_array(self, da): array = np.array(da) if array.ndim", "is_str class Easing(param.Parameterized): interp = param.ClassSelector( default=None, class_=Iterable, doc=f\"Interpolation method; {INTERPS}\", precedence=PRECEDENCES[\"interp\"], )", "if is_xarray: result = self._rebuild_da(result, da, dims, coords) return result def _prep_xarray(self, da):", ") num_states = param.Integer(doc=\"Number of states\", **DEFAULTS[\"num_kwds\"]) num_steps = param.Integer( doc=\"Number of frames", "to the initial state, \" \"traceback backtracks the original path to the initial", "steps, interp, ease, num_states, num_steps, num_items ): array = array.astype(float) result = self._interp_numeric(", "da_origin = da.copy() is_xarray = isinstance(da, xr.DataArray) is_bar = False if is_xarray: if", "np.issubdtype(array_dtype, np.timedelta64): result = self._interp_time(array, pd.to_timedelta, *interp_args) elif np.issubdtype(array_dtype, np.number) and not is_bar:", "* ts[~index]) + (4 * ts[~index]) - 1 return ts def _cubic(self, ts,", "2 * ts[~index] - 2 ts[~index] = 0.5 * ts[~index] * ts[~index] *", "\"in_out\": ts[index] = 0.5 * (1 - ts[index]) ts[~index] = 0.5 * ts[~index]", "+ 1 return ts def _elastic(self, ts, ease): if ease == \"in\": ts", "\"in\": ts = ts * ts elif ease == \"out\": ts = -(ts", "= np.hstack([result, result_back]) return result def _rebuild_da(self, result, da, dims, coords): if len(dims)", "< 9 / 10) & ~index1 & ~index0 index3 = ts >= 9", "- ts * np.sin(ts * np.pi)) elif ease == \"in_out\": index = ts", "result = np.repeat(array, num_steps, axis=-1) num_roll = -int(np.ceil(num_steps / num_states * 2)) if", ">= 0.5) & (ts != 1) ts[index0] = 0.5 * np.power(2, (20 *", "result = self._rebuild_da(result, da, dims, coords) return result def _prep_xarray(self, da): name =", "[item_dim, \"batch\"]}) da = da.transpose(\"stacked\", \"state\") coords = da.drop_vars(\"state\", errors=\"ignore\").coords is_bar = da.attrs.get(\"is_bar\")", "-8 * ts[~index] * ts[~index] * ts[~index] * ts[~index] + 1 return ts", "num_items, num_result, name ) elif interp == \"fill\" or name.endswith( (\"zoom\", \"discrete_trail\", \"morph_trail\",", ">= 9 / 10 ts[index0] = 121 * ts[index0] * ts[index0] / 16", "== \"in\": ts = np.sin(13 * np.pi / 2 * ts) * np.power(2,", "backtracks the original path to the initial state, and \" \"rollback is like", "121 * ts[index0] * ts[index0] / 16 ts[index1] = ( (363 / 40.0", "array, num_states, num_steps, num_items, num_result, name ) elif interp == \"fill\" or name.endswith(", "* (ts - 1)) elif ease == \"out\": ts = np.sin(-13 * np.pi", "* ( np.sqrt(-((2 * ts[~index]) - 3) * ((2 * ts[~index]) - 1))", "ts != 0 ts[~index] = 0 ts[index] = np.power(2, 10 * (ts[index] -", "the original path to the initial state, and \" \"rollback is like traceback,", "num_steps = int(np.ceil(100 / num_states)) else: num_steps = self.frames with param.edit_constant(self): self.num_steps =", "= 1 - ts elif ease == \"out\": pass elif ease == \"in_out\":", "1 index0 = ts < 4 / 11 index1 = (ts < 8", "* ts[index3]) - (513 / 25.0 * ts[index3]) + 268 / 25.0 )", "= np.hstack([array, array[:, :1]]) return array def _calc_shapes(self, array): num_items, num_states = array.shape", "{EASES}\", precedence=PRECEDENCES[\"interp\"], ) frames = param.Integer( default=None, bounds=(1, None), doc=\"Number of frames between", "item_dim: if \"batch\" in da.dims: da = da.transpose(item_dim, \"batch\", \"state\", ...) else: da", "1: result = result.squeeze() result = xr.DataArray( result, dims=da.dims, coords=coords, name=da.name, attrs=da.attrs, )", "array = array[np.newaxis, :] if self.revert == \"boomerang\": array = np.hstack([array, array[:, :1]])", "\"cubic\" ease = self.ease da_origin = da.copy() is_xarray = isinstance(da, xr.DataArray) is_bar =", "ts[index] = 2 * ts[index] * ts[index] ts[~index] = (-2 * ts[~index] *", "= ( 0.5 * np.sin(13 * np.pi / 2 * (2 * ts[index]))", "def _prep_array(self, da): array = np.array(da) if array.ndim == 1: array = array[np.newaxis,", "name=da.name, attrs=da.attrs, ) if \"stacked\" in result.dims: result = result.unstack().transpose(*dims) return result def", "num_steps, axis=-1) init_nans = np.isnan(init) init[init_nans] = 0 # temporarily fill the nans", "2) result = result[:, half_way:-half_way] if num_steps % 2 != 0: result =", "fill, dtype=dtype) indices = np.arange(num_states) * num_steps indices[-1] -= 1 result[:, indices] =", "if not name.endswith(\"discrete_trail\"): result = result.ffill(axis=1).fillna(\"\").values result[:, -1] = array[:, -1] else: result", "\"state\" not in da.dims: return da_origin ( da, name, dims, coords, interp, ease,", ") elif interp == \"fill\" or name.endswith( (\"zoom\", \"discrete_trail\", \"morph_trail\", \"tick_label\", \"bar_label\") ):", "* ts * ts * ts elif ease == \"out\": ts = (ts", ":] if self.revert == \"boomerang\": array = np.hstack([array, array[:, :1]]) return array def", "self._interp_color(array, num_result) elif is_bar: result = self._interp_fill(array, num_states, num_steps, name) else: # str", "2 * ((2 * ts[~index] - 1) + 1)) * np.power(2, -10 *", "num_states)) else: num_steps = self.frames with param.edit_constant(self): self.num_steps = num_steps num_result = (num_states", "/ 10.0 * ts[index1]) + 17 / 5.0 ) ts[index2] = ( (4356", "num_states, num_steps, num_result = self._calc_shapes(array) if (num_steps == 1 or num_states == 1)", "name): if result.ndim == 1: result_back = result[::-1] else: result_back = result[:, ::-1]", "\"central_longitude\": interp = \"linear\" result = self._interp_numeric(array, *interp_args) elif name in \"c\": #", "da.dims: da = da.transpose(item_dim, \"batch\", \"state\", ...) else: da = da.transpose(item_dim, \"state\", ...)", "result = ( pd.DataFrame( array, columns=np.arange(0, num_states * num_steps, num_steps), ) .T.reindex(indices) .T", "1, num_steps) interp_args = (steps, interp, ease, num_states, num_steps, num_items) array_dtype = array.dtype", "= \"linear\" result = self._interp_numeric(array, *interp_args) elif name in \"c\": # must be", "* ts - ts * np.sin(ts * np.pi)) elif ease == \"in_out\": index", "ts[~index] = 1 - (2 * ts[~index] - 1) ts[~index] = ( 0.5", "8 * ts[index] * ts[index] * ts[index] * ts[index] ts[~index] = ts[~index] -", "* ts) elif ease == \"in_out\": index = ts < 0.5 ts[index] =", "ts[index] ts[index] = 0.5 * ( ts[index] * ts[index] * ts[index] - ts[index]", "1) ts[~index] = ( 0.5 * ( 1 - ( ts[~index] * ts[~index]", "2)) if num_states > 2: result = np.roll(result, num_roll, axis=-1) result = result[:,", "9 / 10) & ~index1 & ~index0 index3 = ts >= 9 /", "np.cos(ts * np.pi)) return ts def _circular(self, ts, ease): if ease == \"in\":", "durations\", precedence=PRECEDENCES[\"interp\"], ) num_states = param.Integer(doc=\"Number of states\", **DEFAULTS[\"num_kwds\"]) num_steps = param.Integer( doc=\"Number", "1) and self.revert is None: return da_origin steps = np.linspace(0, 1, num_steps) interp_args", "and not is_bar: if name == \"central_longitude\": interp = \"linear\" result = self._interp_numeric(array,", "1 ) return ts def _exponential(self, ts, ease): if ease == \"in\": index", "interp, ease, is_bar, is_errorbar_morph def _prep_array(self, da): array = np.array(da) if array.ndim ==", "ts[index3]) - (513 / 25.0 * ts[index3]) + 268 / 25.0 ) if", "item_dim in da.dims: if \"item\" in item_dim: if \"batch\" in da.dims: da =", "/ num_states)) else: num_steps = self.frames with param.edit_constant(self): self.num_steps = num_steps num_result =", "and not is_errorbar_morph: result = self._interp_first( array, num_states, num_steps, num_items, num_result, name )", "return result def _interp_text(self, array, num_states, num_steps, num_result): result = np.repeat(array, num_steps, axis=-1)", "1 - (ts[index] * 2) ts[~index] = ts[~index] * 2 - 1 index0", "ts[~index] * ts[~index] * ts[~index] - ts[~index] * np.sin(ts[~index] * np.pi) ) )", "ts = -(ts * (ts - 2)) elif ease == \"in_out\": index =", "not name.endswith(\"discrete_trail\"): result = result.ffill(axis=1).fillna(\"\").values result[:, -1] = array[:, -1] else: result =", "is_str(array): fill = \"\" dtype = np.object else: fill = 0.0 dtype =", "= 1 - ts elif ease == \"in_out\": ts[index] = 1 - (ts[index]", "* ts[~index] - 1) + 1)) * np.power(2, -10 * (2 * ts[~index]", "* ts[~index] * ts[~index] + 1 return ts def _quartic(self, ts, ease): if", "= array.shape if self.frames is None: if num_states < 10: num_steps = int(np.ceil(60", "1 - (ts * ts * ts - ts * np.sin(ts * np.pi))", "* np.pi / 2 * (2 * ts[index])) * np.power(2, 10 * ((2", "**DEFAULTS[\"num_kwds\"]) num_steps = param.Integer( doc=\"Number of frames between each base state\", **DEFAULTS[\"num_kwds\"] )", "0.5 * np.sin(13 * np.pi / 2 * (2 * ts[index])) * np.power(2,", "np.repeat(array[:, 1:], num_steps, axis=-1) stop_nans = np.isnan(stop) tiled_steps = np.tile(steps, (num_states - 1)", ") ts[~index] = 1 - (2 * ts[~index] - 1) ts[~index] = (", "np.sin(-13 * np.pi / 2 * (ts + 1)) * np.power(2, -10 *", "( (54 / 5.0 * ts[index3] * ts[index3]) - (513 / 25.0 *", "ts * np.sin(ts * np.pi) elif ease == \"out\": ts = 1 -", "np.pi) elif ease == \"out\": ts = 1 - ts ts = 1", "in da.dims: return da_origin ( da, name, dims, coords, interp, ease, is_bar, is_errorbar_morph,", "ease): if ease == \"in\": ts = ts * ts elif ease ==", "ease, num_states, num_steps, num_items ) result = conversion(result.ravel()).values result = result.reshape(num_items, -1) return", "(20 * ts[index0]) - 10) ts[index1] = -0.5 * np.power(2, (-20 * ts[index1])", "if self.revert == \"boomerang\": array = np.hstack([array, array[:, :1]]) return array def _calc_shapes(self,", "self.revert in [\"traceback\", \"rollback\"]: result = self._apply_revert(result, name) if is_xarray: result = self._rebuild_da(result,", "ts - ts * np.sin(ts * np.pi)) elif ease == \"in_out\": index =", "state\", **DEFAULTS[\"num_kwds\"] ) def __init__(self, **kwds): super().__init__(**kwds) def interpolate(self, da, name=\"\"): interp =", "< 0.5 ts[index] = 0.5 * (1 - np.sqrt(1 - 4 * (ts[index]", "(1 - np.sqrt(1 - 4 * (ts[index] * ts[index]))) ts[~index] = 0.5 *", "between each base state\", **DEFAULTS[\"num_kwds\"] ) def __init__(self, **kwds): super().__init__(**kwds) def interpolate(self, da,", "import DEFAULTS, EASES, INTERPS, PRECEDENCES, REVERTS from .util import is_str class Easing(param.Parameterized): interp", "_prep_xarray(self, da): name = da.name interp = da.attrs.get(\"interp\") ease = da.attrs.get(\"ease\") for item_dim", "result.unstack().transpose(*dims) return result def _interp_first(self, array, num_states, num_steps, num_items, num_result, name): if is_str(array):", "num_states, num_steps, num_result): result = np.repeat(array, num_steps, axis=-1) num_roll = -int(np.ceil(num_steps / num_states", "the nans stop = np.repeat(array[:, 1:], num_steps, axis=-1) stop_nans = np.isnan(stop) tiled_steps =", "0.5) & (ts != 1) ts[index0] = 0.5 * np.power(2, (20 * ts[index0])", "== 1 or num_states == 1) and self.revert is None: return da_origin steps", "np.sin(13 * np.pi / 2 * (2 * ts[index])) * np.power(2, 10 *", "= (2 * ts[~index]) - 2 ts[~index] = ( 0.5 * ts[~index] *", ") weights = getattr(self, f\"_{interp.lower()}\")(tiled_steps, ease) result = stop * weights + init", "= self.ease da_origin = da.copy() is_xarray = isinstance(da, xr.DataArray) is_bar = False if", "= result.unstack().transpose(*dims) return result def _interp_first(self, array, num_states, num_steps, num_items, num_result, name): if", "- np.sqrt(1 - 4 * (ts[index] * ts[index]))) ts[~index] = 0.5 * (", "elif ease == \"in_out\": index = ts < 0.5 ts[index] = 16 *", "= np.isnan(init) init[init_nans] = 0 # temporarily fill the nans stop = np.repeat(array[:,", "da = da.transpose(item_dim, \"state\", ...) break dims = da.dims if da.ndim > 2:", "0 ts[index] = np.power(2, 10 * (ts[index] - 1)) elif ease == \"out\":", "def _rebuild_da(self, result, da, dims, coords): if len(dims) == 1: result = result.squeeze()", "state cmap = LinearSegmentedColormap.from_list(\"eased\", colors, N=num_result) results.append([rgb2hex(rgb) for rgb in cmap(np.arange(num_result))]) result =", "if num_states > 2: result = np.roll(result, num_roll, axis=-1) result = result[:, :num_result]", "in \"c\": # must be after number result = self._interp_color(array, num_result) elif is_bar:", "return result def _interp_color(self, array, num_result): results = [] for colors in array:", "np.power(2, 10 * (ts[index] - 1)) elif ease == \"out\": index = ts", "bounds=(1, None), doc=\"Number of frames between each base state\", precedence=PRECEDENCES[\"interp\"], ) revert =", "np.pi / 2) elif ease == \"in_out\": ts = 0.5 * (1 -", "ease == \"in\": ts = 1 - ts elif ease == \"in_out\": ts[index]", "result = self._interp_time(array, pd.to_datetime, *interp_args) elif np.issubdtype(array_dtype, np.timedelta64): result = self._interp_time(array, pd.to_timedelta, *interp_args)", "0.5 ) return ts def _bounce(self, ts, ease): index = ts < 0.5", "* 2) ts[~index] = ts[~index] * 2 - 1 index0 = ts <", "ts * ts * ts * ts elif ease == \"out\": ts =", "in [\"traceback\", \"rollback\"]: result = self._apply_revert(result, name) if is_xarray: result = self._rebuild_da(result, da,", "= self._interp_fill(array, num_states, num_steps, name) else: # str result = self._interp_text(array, num_states, num_steps,", "(ts < 8 / 11) & ~index0 index2 = (ts < 9 /", "/ 60, result_back.shape[-1])[np.newaxis, :] result = np.hstack([result, result_back]) return result def _rebuild_da(self, result,", "self._interp_numeric( array, steps, interp, ease, num_states, num_steps, num_items ) result = conversion(result.ravel()).values result", "else: half_way = int(num_result / 2) result = result[:, half_way:-half_way] if num_steps %", "= self._interp_fill(array, num_states, num_steps, name) elif np.issubdtype(array_dtype, np.datetime64): result = self._interp_time(array, pd.to_datetime, *interp_args)", "revert = param.ObjectSelector( default=None, objects=REVERTS, doc=\"Method for reverting to the initial state; \"", "+ 1 return ts def _quintic(self, ts, ease): if ease == \"in\": ts", "self.revert == \"rollback\": result_back = np.repeat(1 / 60, result_back.shape[-1])[np.newaxis, :] result = np.hstack([result,", "* 2)) if num_states > 2: result = np.roll(result, num_roll, axis=-1) result =", "ts, ease): if ease == \"in\": ts = np.sin(13 * np.pi / 2", "def _elastic(self, ts, ease): if ease == \"in\": ts = np.sin(13 * np.pi", "ts def _exponential(self, ts, ease): if ease == \"in\": index = ts !=", "(ts - 1) * (ts - 1) + 1 elif ease == \"in_out\":", "\"state\", ...) else: da = da.transpose(item_dim, \"state\", ...) break dims = da.dims if", "return ts def _cubic(self, ts, ease): if ease == \"in\": ts = ts", "\"in_out\": index = ts < 0.5 ts[index] = ( 0.5 * np.sin(13 *", "* ts[~index]) - 1)) + 1 ) return ts def _exponential(self, ts, ease):", "else: result_back = result[:, ::-1] if name == \"duration\" and self.revert == \"rollback\":", "np.repeat(array, num_steps, axis=-1) num_roll = -int(np.ceil(num_steps / num_states * 2)) if num_states >", "traceback, but disregards the \" \"original's path durations\", precedence=PRECEDENCES[\"interp\"], ) num_states = param.Integer(doc=\"Number", "2 ts[~index] = 0.5 * ts[~index] * ts[~index] * ts[~index] + 1 return", "\"c\": # must be after number result = self._interp_color(array, num_result) elif is_bar: result", "name == \"central_longitude\": interp = \"linear\" result = self._interp_numeric(array, *interp_args) elif name in", "ts[index] ts[~index] = (2 * ts[~index]) - 2 ts[~index] = ( 0.5 *", "da): array = np.array(da) if array.ndim == 1: array = array[np.newaxis, :] if", "num_states, num_steps, name) else: # str result = self._interp_text(array, num_states, num_steps, num_result) if", "* (ts - 1) * (ts - 1) * (ts - 1) +", "stop * weights + init * (1 - weights) result[init_nans | stop_nans] =", "index = ts < 0.5 ts[index] = 2 * ts[index] * ts[index] ts[~index]", "ease == \"in_out\": ts[index] = 1 - (ts[index] * 2) ts[~index] = ts[~index]", "40.0 * ts[index1] * ts[index1]) - (99 / 10.0 * ts[index1]) + 17", "(-20 * ts[index1]) + 10) + 1 return ts def _elastic(self, ts, ease):", "* ts[index3] * ts[index3]) - (513 / 25.0 * ts[index3]) + 268 /", "name in \"c\": # must be after number result = self._interp_color(array, num_result) elif", "da.dims: if \"item\" in item_dim: if \"batch\" in da.dims: da = da.transpose(item_dim, \"batch\",", "2) elif ease == \"in_out\": ts = 0.5 * (1 - np.cos(ts *", "_interp_time( self, array, conversion, steps, interp, ease, num_states, num_steps, num_items ): array =", "* ts[index])) * np.power(2, 10 * ((2 * ts[index]) - 1)) ) ts[~index]", "= param.Integer( default=None, bounds=(1, None), doc=\"Number of frames between each base state\", precedence=PRECEDENCES[\"interp\"],", "- 1) * num_items).reshape( num_items, -1 ) weights = getattr(self, f\"_{interp.lower()}\")(tiled_steps, ease) result", "0.5 ts[index] = 4 * ts[index] * ts[index] * ts[index] ts[~index] = 2", "interp, ease, num_states, num_steps, num_items ) result = conversion(result.ravel()).values result = result.reshape(num_items, -1)", "dims = da.dims if da.ndim > 2: # more than (item, state) if", "ts = 1 - ts ts = 1 - (ts * ts *", "25.0 * ts[index3]) + 268 / 25.0 ) if ease == \"in\": ts", "= 1 - np.sqrt(1 - (ts * ts)) elif ease == \"out\": ts", "2 * ts[index] * ts[index] ts[~index] = (-2 * ts[~index] * ts[~index]) +", "= isinstance(da, xr.DataArray) is_bar = False if is_xarray: if \"state\" not in da.dims:", "num_result = (num_states - 1) * num_steps return num_items, num_states, num_steps, num_result def", "< 0.5 ts[index] = 2 * ts[index] ts[index] = 0.5 * ( ts[index]", "= da.transpose(\"stacked\", \"state\") coords = da.drop_vars(\"state\", errors=\"ignore\").coords is_bar = da.attrs.get(\"is_bar\") is_errorbar_morph = da.attrs.get(\"is_errorbar_morph\")", "as np import pandas as pd import param import xarray as xr from", "1) + 1)) * np.power(2, -10 * (2 * ts[~index] - 1)) +", "ts[index] = 1 - (ts[index] * 2) ts[~index] = ts[~index] * 2 -", "* np.pi)) return ts def _circular(self, ts, ease): if ease == \"in\": ts", "* ts[index] * ts[index] - ts[index] * np.sin(ts[index] * np.pi) ) ts[~index] =", "stop_nans] = np.nan # replace nans return result def _linear(self, ts, ease): return", "initial state, \" \"traceback backtracks the original path to the initial state, and", "return ts def _quartic(self, ts, ease): if ease == \"in\": ts = ts", "(ts + 1)) * np.power(2, -10 * ts) + 1 elif ease ==", "ts[~index] = ts[~index] * 2 - 1 index0 = ts < 4 /", "not is_bar: if name == \"central_longitude\": interp = \"linear\" result = self._interp_numeric(array, *interp_args)", "ts[~index] * ts[~index] * ts[~index] + 1 ) return ts def _sine(self, ts,", "the initial state, \" \"traceback backtracks the original path to the initial state,", "import xarray as xr from matplotlib.colors import LinearSegmentedColormap, rgb2hex from .configuration import DEFAULTS,", "* (2 * ts[~index] - 1)) + 2 ) return ts def _back(self,", "* ts[~index] * ts[~index] * ts[~index] * ts[~index] * ts[~index] + 1 )", "- 1 ts[~index] = -8 * ts[~index] * ts[~index] * ts[~index] * ts[~index]", "result_back.shape[-1])[np.newaxis, :] result = np.hstack([result, result_back]) return result def _rebuild_da(self, result, da, dims,", "num_steps, num_result = self._calc_shapes(array) if (num_steps == 1 or num_states == 1) and", "ts[index] = np.power(2, 10 * (ts[index] - 1)) elif ease == \"out\": index", "da, name=\"\"): interp = self.interp or \"cubic\" ease = self.ease da_origin = da.copy()", "np.pi / 2 * (ts + 1)) * np.power(2, -10 * ts) +", "or name.endswith( (\"zoom\", \"discrete_trail\", \"morph_trail\", \"tick_label\", \"bar_label\") ): result = self._interp_fill(array, num_states, num_steps,", "( pd.DataFrame( array, columns=np.arange(0, num_states * num_steps, num_steps), ) .T.reindex(indices) .T ) if", "array, steps, interp, ease, num_states, num_steps, num_items ): init = np.repeat(array[:, :-1], num_steps,", "np.power(2, -10 * ts) + 1 elif ease == \"in_out\": index = ts", "= np.linspace(0, 1, num_steps) interp_args = (steps, interp, ease, num_states, num_steps, num_items) array_dtype", "- (99 / 10.0 * ts[index1]) + 17 / 5.0 ) ts[index2] =", "= self._prep_array(da) num_items, num_states, num_steps, num_result = self._calc_shapes(array) if (num_steps == 1 or", "(ts - 1)) elif ease == \"out\": ts = np.sin(-13 * np.pi /", "== 1: result = result.squeeze() result = xr.DataArray( result, dims=da.dims, coords=coords, name=da.name, attrs=da.attrs,", "coords): if len(dims) == 1: result = result.squeeze() result = xr.DataArray( result, dims=da.dims,", "* ( ts[index] * ts[index] * ts[index] - ts[index] * np.sin(ts[index] * np.pi)", "= ts[~index] * 2 - 1 index0 = ts < 4 / 11", "ts[index2] = ( (4356 / 361.0 * ts[index2] * ts[index2]) - (35442 /", "doc=\"Number of frames between each base state\", precedence=PRECEDENCES[\"interp\"], ) revert = param.ObjectSelector( default=None,", "return result def _interp_time( self, array, conversion, steps, interp, ease, num_states, num_steps, num_items", "is_errorbar_morph def _prep_array(self, da): array = np.array(da) if array.ndim == 1: array =", "= ts != 1 ts[~index] = 1 ts[index] = 1 - np.power(2, -10", "(2 * ts[index])) * np.power(2, 10 * ((2 * ts[index]) - 1)) )", "num_states, num_steps, num_items, num_result, name ) elif interp == \"fill\" or name.endswith( (\"zoom\",", "num_states, num_steps, num_result def _apply_revert(self, result, name): if result.ndim == 1: result_back =", "temporarily fill the nans stop = np.repeat(array[:, 1:], num_steps, axis=-1) stop_nans = np.isnan(stop)", "- ts[index] * np.sin(ts[index] * np.pi) ) ts[~index] = 1 - (2 *", "ts[index] = 4 * ts[index] * ts[index] * ts[index] ts[~index] = 2 *", "= da.name interp = da.attrs.get(\"interp\") ease = da.attrs.get(\"ease\") for item_dim in da.dims: if", "\"original's path durations\", precedence=PRECEDENCES[\"interp\"], ) num_states = param.Integer(doc=\"Number of states\", **DEFAULTS[\"num_kwds\"]) num_steps =", "if name == \"duration\" and self.revert == \"rollback\": result_back = np.repeat(1 / 60,", "1) * num_steps return num_items, num_states, num_steps, num_result def _apply_revert(self, result, name): if", "result_back]) return result def _rebuild_da(self, result, da, dims, coords): if len(dims) == 1:", "11 index1 = (ts < 8 / 11) & ~index0 index2 = (ts", "is_bar, is_errorbar_morph def _prep_array(self, da): array = np.array(da) if array.ndim == 1: array", "da.dims if da.ndim > 2: # more than (item, state) if \"grid_item\" in", "name, dims, coords, interp, ease, is_bar, is_errorbar_morph def _prep_array(self, da): array = np.array(da)", "ts)) elif ease == \"out\": ts = np.sqrt((2 - ts) * ts) elif", "self.ease da_origin = da.copy() is_xarray = isinstance(da, xr.DataArray) is_bar = False if is_xarray:", "(ts - 1) * (ts - 1) * (ts - 1) + 1", "\"in_out\": index = ts < 0.5 ts[index] = 4 * ts[index] * ts[index]", "is_bar: result = self._interp_fill(array, num_states, num_steps, name) else: # str result = self._interp_text(array,", "result = np.array(results) return result def _interp_text(self, array, num_states, num_steps, num_result): result =", "5.0 ) ts[index2] = ( (4356 / 361.0 * ts[index2] * ts[index2]) -", "else: fill = 0.0 dtype = None result = np.full((num_items, num_result), fill, dtype=dtype)", "& (ts != 1) index1 = (ts != 0) & (ts >= 0.5)", "if is_xarray: if \"state\" not in da.dims: return da_origin ( da, name, dims,", "num_items, num_states = array.shape if self.frames is None: if num_states < 10: num_steps", "* ts[index]))) ts[~index] = 0.5 * ( np.sqrt(-((2 * ts[~index]) - 3) *", "(ts - 1) + 1 elif ease == \"in_out\": index = ts <", "\"in\": ts = np.sin(13 * np.pi / 2 * ts) * np.power(2, 10", "num_states, num_steps, name) elif np.issubdtype(array_dtype, np.datetime64): result = self._interp_time(array, pd.to_datetime, *interp_args) elif np.issubdtype(array_dtype,", "array.ndim == 1: array = array[np.newaxis, :] if self.revert == \"boomerang\": array =", "name == \"duration\" and self.revert == \"rollback\": result_back = np.repeat(1 / 60, result_back.shape[-1])[np.newaxis,", "self._interp_fill(array, num_states, num_steps, name) elif np.issubdtype(array_dtype, np.datetime64): result = self._interp_time(array, pd.to_datetime, *interp_args) elif", "np.pi / 2 * ((2 * ts[~index] - 1) + 1)) * np.power(2,", "* np.pi) ) ts[~index] = 1 - (2 * ts[~index] - 1) ts[~index]", "default=None, class_=Iterable, doc=f\"Interpolation method; {INTERPS}\", precedence=PRECEDENCES[\"interp\"], ) ease = param.ClassSelector( default=\"in_out\", class_=Iterable, doc=f\"Type", "* np.pi) elif ease == \"out\": ts = 1 - ts ts =", ":-1] return result def _interp_time( self, array, conversion, steps, interp, ease, num_states, num_steps,", "== \"in\": ts = ts * ts elif ease == \"out\": ts =", "np.sin(ts[~index] * np.pi) ) ) + 0.5 ) return ts def _bounce(self, ts,", "/ 2 * (ts + 1)) * np.power(2, -10 * ts) + 1", "_elastic(self, ts, ease): if ease == \"in\": ts = np.sin(13 * np.pi /", "np.arange(num_states * num_steps - num_steps) result = ( pd.DataFrame( array, columns=np.arange(0, num_states *", "ts[index] * ts[index] ts[~index] = (2 * ts[~index]) - 2 ts[~index] = (", "# replace nans return result def _linear(self, ts, ease): return ts def _quadratic(self,", "ease == \"out\": ts = np.sqrt((2 - ts) * ts) elif ease ==", "elif ease == \"out\": index = ts != 1 ts[~index] = 1 ts[index]", "elif ease == \"in_out\": index = ts < 0.5 ts[index] = 2 *", "- 1 index0 = ts < 4 / 11 index1 = (ts <", "index = ts < 0.5 ts[index] = ( 0.5 * np.sin(13 * np.pi", "2: # more than (item, state) if \"grid_item\" in dims: da = da.stack({\"stacked\":", "< 4 / 11 index1 = (ts < 8 / 11) & ~index0", "import numpy as np import pandas as pd import param import xarray as", "= 121 * ts[index0] * ts[index0] / 16 ts[index1] = ( (363 /", "= np.full((num_items, num_result), fill, dtype=dtype) indices = np.arange(num_states) * num_steps indices[-1] -= 1", "is_bar: if name == \"central_longitude\": interp = \"linear\" result = self._interp_numeric(array, *interp_args) elif", "da.transpose(item_dim, \"batch\", \"state\", ...) else: da = da.transpose(item_dim, \"state\", ...) break dims =", "ts elif ease == \"out\": ts = -(ts * (ts - 2)) elif", "= 1 - np.power(2, -10 * ts[index]) elif ease == \"in_out\": index0 =", "num_steps, name) else: # str result = self._interp_text(array, num_states, num_steps, num_result) if self.revert", "ts < 0.5 ts[index] = ( 0.5 * np.sin(13 * np.pi / 2", "steps, interp, ease, num_states, num_steps, num_items ): init = np.repeat(array[:, :-1], num_steps, axis=-1)", "LinearSegmentedColormap, rgb2hex from .configuration import DEFAULTS, EASES, INTERPS, PRECEDENCES, REVERTS from .util import", "elif ease == \"out\": pass elif ease == \"in_out\": ts[index] = 0.5 *", "* ts elif ease == \"out\": ts = (ts - 1) * (ts", "interp, ease, num_states, num_steps, num_items ): array = array.astype(float) result = self._interp_numeric( array,", "< 0.5 ts[index] = 4 * ts[index] * ts[index] * ts[index] ts[~index] =", "ease == \"in_out\": index = ts < 0.5 ts[index] = 4 * ts[index]", "is None: if num_states < 10: num_steps = int(np.ceil(60 / num_states)) else: num_steps", "= np.array(results) return result def _interp_text(self, array, num_states, num_steps, num_result): result = np.repeat(array,", "ts[~index]) - 3) * ((2 * ts[~index]) - 1)) + 1 ) return", "1)) ) ts[~index] = 0.5 * ( np.sin(-13 * np.pi / 2 *", "0.5 ts[index] = 0.5 * (1 - np.sqrt(1 - 4 * (ts[index] *", "= 0 # temporarily fill the nans stop = np.repeat(array[:, 1:], num_steps, axis=-1)", "num_steps, name) elif np.issubdtype(array_dtype, np.datetime64): result = self._interp_time(array, pd.to_datetime, *interp_args) elif np.issubdtype(array_dtype, np.timedelta64):", "* ts[~index] * ts[~index] * ts[~index] + 1 return ts def _quartic(self, ts,", "def interpolate(self, da, name=\"\"): interp = self.interp or \"cubic\" ease = self.ease da_origin", "= (ts < 9 / 10) & ~index1 & ~index0 index3 = ts", "from matplotlib.colors import LinearSegmentedColormap, rgb2hex from .configuration import DEFAULTS, EASES, INTERPS, PRECEDENCES, REVERTS", "= result.squeeze() result = xr.DataArray( result, dims=da.dims, coords=coords, name=da.name, attrs=da.attrs, ) if \"stacked\"", "ts[~index] * ts[~index] * ts[~index] + 1 return ts def _quintic(self, ts, ease):", "* (ts - 1) * (ts - 1) * (1 - ts) +", "the initial state, and \" \"rollback is like traceback, but disregards the \"", "ts def _circular(self, ts, ease): if ease == \"in\": ts = 1 -", "* (ts - 1) * (ts - 1) * (ts - 1) *", "ts[index] * ts[index] * ts[index] ts[~index] = ts[~index] - 1 ts[~index] = -8", "16 * ts[index] * ts[index] * ts[index] * ts[index] * ts[index] ts[~index] =", "name.endswith(\"discrete_trail\"): result = result.ffill(axis=1).fillna(\"\").values result[:, -1] = array[:, -1] else: result = result.values", "* (1 - np.sqrt(1 - 4 * (ts[index] * ts[index]))) ts[~index] = 0.5", "((2 * ts[index]) - 1)) ) ts[~index] = 0.5 * ( np.sin(-13 *", "if ease == \"in\": ts = np.sin(13 * np.pi / 2 * ts)", "== \"in_out\": index = ts < 0.5 ts[index] = 2 * ts[index] *", "* ts[index] ts[~index] = (2 * ts[~index]) - 2 ts[~index] = ( 0.5", "_interp_first(self, array, num_states, num_steps, num_items, num_result, name): if is_str(array): fill = \"\" dtype", "# must be after number result = self._interp_color(array, num_result) elif is_bar: result =", "ts[index] * ts[index] * ts[index] * ts[index] ts[~index] = (2 * ts[~index]) -", "is_errorbar_morph = da.attrs.get(\"is_errorbar_morph\") return da, name, dims, coords, interp, ease, is_bar, is_errorbar_morph def", "num_states * num_steps, num_steps), ) .T.reindex(indices) .T ) if not name.endswith(\"discrete_trail\"): result =", "* ts[index3]) + 268 / 25.0 ) if ease == \"in\": ts =", "_quadratic(self, ts, ease): if ease == \"in\": ts = ts * ts elif", "* np.power(2, 10 * (ts - 1)) elif ease == \"out\": ts =", "np.sin(ts[index] * np.pi) ) ts[~index] = 1 - (2 * ts[~index] - 1)", "elif name in \"c\": # must be after number result = self._interp_color(array, num_result)", "= ( pd.DataFrame( array, columns=np.arange(0, num_states * num_steps, num_steps), ) .T.reindex(indices) .T )", "num_steps) interp_args = (steps, interp, ease, num_states, num_steps, num_items) array_dtype = array.dtype if", "array, num_result): results = [] for colors in array: # item, state cmap", "coords=coords, name=da.name, attrs=da.attrs, ) if \"stacked\" in result.dims: result = result.unstack().transpose(*dims) return result", "= ( (4356 / 361.0 * ts[index2] * ts[index2]) - (35442 / 1805.0", "ease): if ease == \"in\": ts = ts * ts * ts elif", "< 0.5) & (ts != 1) index1 = (ts != 0) & (ts", "array[:, -1] else: result = result.values return result def _interp_color(self, array, num_result): results", "- ts[~index] * np.sin(ts[~index] * np.pi) ) ) + 0.5 ) return ts", "ease == \"in_out\": index = ts < 0.5 ts[index] = 0.5 * (1", "coords) return result def _prep_xarray(self, da): name = da.name interp = da.attrs.get(\"interp\") ease", "array: # item, state cmap = LinearSegmentedColormap.from_list(\"eased\", colors, N=num_result) results.append([rgb2hex(rgb) for rgb in", "= ts < 0.5 ts[index] = 4 * ts[index] * ts[index] * ts[index]", "= da.attrs.get(\"is_bar\") is_errorbar_morph = da.attrs.get(\"is_errorbar_morph\") return da, name, dims, coords, interp, ease, is_bar,", "np.sqrt(1 - (ts * ts)) elif ease == \"out\": ts = np.sqrt((2 -", "= np.sin((ts - 1) * np.pi / 2) + 1 elif ease ==", "in array: # item, state cmap = LinearSegmentedColormap.from_list(\"eased\", colors, N=num_result) results.append([rgb2hex(rgb) for rgb" ]
[ "# if request.user.is_authenticated: # if Follow.objects.filter(author=user_profile, # user=request.user).exists(): # following = True #", "request.GET.get('page') # page = paginator.get_page(page_number) # return render(request, \"group.html\", {'group': group, # 'post_list':", "post_edit(request, username, post_id): # '''Страница редактирования публикации''' # title = 'Редактировать запись' #", "для добавления комментария к публикации''' # post = get_object_or_404(Post, pk=post_id) # if request.method", "comment = form.save(commit=False) comment.created = timezone.now() comment.save() messages.add_message( self.request, messages.SUCCESS, f'Коментарий отредактирован', )", "self.request, messages.SUCCESS, f'Коментарий отредактирован', ) return super().form_valid(form) def get_success_url(self): return reverse('post', kwargs={'username': self.object.author,", "= super().get_queryset() return query_set.filter( author=profile).select_related( 'group', 'author') # @login_required # def post_edit(request, username,", "follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) following = False if self.request.user.is_authenticated: if Follow.objects.filter(author=profile, user=self.request.user).exists(): following", "== 'POST': # form = CommentForm(request.POST) # if form.is_valid(): # comment = form.save(commit=False)", "request.user # comment.save() # return redirect('post', post_id=post_id, username=username) # return redirect('post', post_id=post_id, username=username)", "from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.core.paginator import Paginator from", "context_object_name = 'comments' @property def extra_context(self): user_profile = get_object_or_404( User.objects.filter(username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following',", "author__following__user=request.user).select_related( # 'group', 'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list, 10)", "# 'post_list': page, # 'paginator': paginator}) class GroupPostView(ListView): model = Post template_name =", "username=username) @login_required def delete_comment(request, username, post_id, comment_id): '''Функция для удаления комментария к публикации'''", "# else: # form = PostForm() # return render(request, 'new_post.html', {'form': form}) #", "= get_object_or_404( User.objects.filter(username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True), post_count=Count('post_author', distinct=True))) post = get_object_or_404( Post.objects.annotate(", "extra_tags='success' ) return super().form_valid(form) def get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author, }) #", "if request.user == comment.author: # if request.method == 'POST': # form = CommentForm(request.POST,", "Post.objects.filter( # group=group).select_related( # 'author', 'group').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator =", "following_count=Count('following', distinct=True))) query_set = super().get_queryset() return query_set.filter( author=profile).select_related( 'group', 'author') # @login_required #", "} def get_queryset(self): query_set = super().get_queryset() return query_set.filter( author__following__user=self.request.user).select_related( 'group', 'author') @login_required def", "= True # return render(request, 'post_view.html', {'post': post, # 'profile': user_profile, # 'comments':", ") return super().form_valid(form) def get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author, }) # @login_required", "group = get_object_or_404(Group, slug=slug) # post_list = Post.objects.filter( # group=group).select_related( # 'author', 'group').annotate(", "# paginator = Paginator(post_list, 10) # page_number = request.GET.get('page') # page = paginator.get_page(page_number)", "paginate_by = 5 context_object_name = 'post_list' @property def extra_context(self): return { 'group': get_object_or_404(Group,", "if Follow.objects.filter(author=profile, user=self.request.user).exists(): following = True return { 'profile': profile, 'following': following }", "'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list, 10) # page_number = request.GET.get('page') # page =", "{ 'title': 'Редактировать запись' } def form_valid(self, form): post = form.save(commit=False) post.pub_date =", "return redirect('profile', username=username) # return redirect('post', post_id=post.pk, username=post.author) class PostDeleteView(LoginRequiredMixin, DeleteView): model =", "'title': 'Создать новою запись' } def form_valid(self, form): post = form.save(commit=False) post.author =", "# '''Страница с публикациями избранных пользователей''' # follow_page = True # post_list =", "return { 'profile': profile, 'following': following } def get_queryset(self): profile = get_object_or_404( User.objects.filter(", "following}) class ProfileView(ListView): model = Post template_name = 'profile.html' paginate_by = 5 context_object_name", "return render( # request, \"new_post.html\", {'form': form, 'title': title, 'post': post}) class PostEditUpdateView(LoginRequiredMixin,", "PostForm template_name = 'new_post.html' extra_context = { 'title': 'Редактировать запись' } def form_valid(self,", "return reverse('index') # def post_view(request, post_id, username): # '''Страница отдельной публикации''' # user_profile", "# def post_delete(request, username, post_id): # '''Функция для удаления публикации''' # post =", "= 'post_delete.html' slug_field = 'username' pk_url_kwarg = 'post_id' success_message = 'Запись удалена' def", "отписки от пользователя''' follover = Follow.objects.filter(author__username=username, user=request.user) follover.delete() return redirect('profile', username=username) @login_required def", "# @login_required # def edit_comment(request, username, post_id, comment_id): # '''Функция для редактирования комментария", "{'form': form, 'title': title}) # form = GroupForm() # return render(request, \"new_post.html\", {'form':", "form = GroupForm() # return render(request, \"new_post.html\", {'form': form, 'title': title}) class GroupAddView(LoginRequiredMixin,", "distinct=True))) # post = get_object_or_404( # Post.objects.annotate( # comment_count=Count( # 'commented_post')).select_related('author', 'group'), #", "distinct=True))) post = get_object_or_404( Post.objects.annotate( comment_count=Count( 'commented_post')).select_related('author', 'group'), pk=self.kwargs['post_id']) following = False if", "None, # files=request.FILES or None, # instance=post) # if form.is_valid(): # post =", "# return redirect('post', post_id=post.pk, username=username) # else: # form = PostForm(instance=post) # else:", "get_object_or_404(Post, pk=self.kwargs['post_id']) comment = form.save(commit=False) comment.post = post comment.author = self.request.user comment.save() return", "@login_required class NewPostCreateView(LoginRequiredMixin, CreateView): model = Post form_class = PostForm template_name = 'new_post.html'", "# user_profile = get_object_or_404( # User.objects.filter( # username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following',", "post_id=post_id, username=username) class AddCommentView(LoginRequiredMixin, CreateView): model = Comment template_name = 'comments.html' form_class =", "= form.save(commit=False) post.author = self.request.user post.save() messages.add_message( self.request, messages.SUCCESS, f'Новая запись добавлена' )", "'follow.html' paginate_by = 5 context_object_name = 'post_list' extra_context = { 'follow_page': True }", "post_id=post_id) # form = CommentForm(instance=comment) # return render(request, \"new_post.html\", {'form': form, 'title': title})", "True # post_list = Post.objects.filter( # author__following__user=request.user).select_related( # 'group', 'author').annotate( # comment_count=Count( #", "{ 'profile': profile, 'following': following } def get_queryset(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate(", "публикации''' # title = 'Редактировать запись' # post = get_object_or_404(Post.objects.select_related('author'), pk=post_id) # if", "User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) following = False if self.request.user.is_authenticated: if Follow.objects.filter(author=profile,", "= False # if request.user.is_authenticated: # if Follow.objects.filter(author=user_profile, # user=request.user).exists(): # following =", "с публикиями связанными с группой''' # group = get_object_or_404(Group, slug=slug) # post_list =", "page = paginator.get_page(page_number) # return render(request, \"group.html\", {'group': group, # 'post_list': page, #", "@login_required # def add_comment(request, username, post_id): # '''Функция для добавления комментария к публикации'''", "'group'), pk=self.kwargs['post_id']) following = False if self.request.user.is_authenticated: if Follow.objects.filter(author=user_profile, user=self.request.user).exists(): following = True", "= True return {'post': post, 'profile': user_profile, 'form': CommentForm(), 'following': following} def get_queryset(self):", "model = Post template_name = 'follow.html' paginate_by = 5 context_object_name = 'post_list' extra_context", "Comment template_name = 'comments.html' form_class = CommentForm slug_field = 'username' pk_url_kwarg = 'post_id'", "= 'new_post.html' form_class = CommentForm pk_url_kwarg = 'comment_id' extra_context = { 'title': 'Редактировать", "if request.method == 'POST': # form = CommentForm(request.POST, instance=comment) # if form.is_valid(): #", "Post template_name = 'index.html' paginate_by = 5 context_object_name = 'post_list' extra_context = {", "redirect('profile', username=username) # return redirect('post', post_id=post.pk, username=post.author) class PostDeleteView(LoginRequiredMixin, DeleteView): model = Post", "form.save(commit=False) post.pub_date = timezone.now() post.save() messages.add_message( self.request, messages.SUCCESS, f'Запись обновлена!', extra_tags='success' ) return", "= GroupForm(request.POST) # if form.is_valid(): # slug = form.cleaned_data['slug'] # form.save() # return", "paginate_by = 5 context_object_name = 'post_list' extra_context = { 'follow_page': True } def", "GroupForm(request.POST) # if form.is_valid(): # slug = form.cleaned_data['slug'] # form.save() # return redirect(\"group\",", "def form_valid(self, form): comment = form.save(commit=False) comment.created = timezone.now() comment.save() messages.add_message( self.request, messages.SUCCESS,", "= super().get_queryset() return query_set.filter( post=self.kwargs['post_id']).select_related('author') # def profile(request, username): # '''Страница с публикациями", "return query_set.filter( group__slug=self.kwargs['slug']).select_related( 'author', 'group') # @login_required # def new_post(request): # '''Страница создания", "публикации''' # post = get_object_or_404(Post, pk=post_id) # if request.method == 'POST': # form", "return render(request, \"new_post.html\", {'form': form, 'title': title}) class CommentEditView(LoginRequiredMixin, UpdateView): model = Comment", "get_success_url(self, **kwargs): return reverse('group', kwargs={'slug': self.object.slug}) def page_not_found(request, exception): '''Страница 404''' return render(request,", "timezone.now() comment.save() messages.add_message( self.request, messages.SUCCESS, f'Коментарий отредактирован', ) return super().form_valid(form) def get_success_url(self): return", "'Редактировать комментарий' # comment = get_object_or_404(Comment, post=post_id, pk=comment_id) # if request.user == comment.author:", "= False if self.request.user.is_authenticated: if Follow.objects.filter(author=user_profile, user=self.request.user).exists(): following = True return {'post': post,", "files=request.FILES or None, # instance=post) # if form.is_valid(): # post = form.save(commit=False) #", "# return redirect('index') # else: # form = PostForm() # return render(request, 'new_post.html',", "LoginRequiredMixin from django.core.paginator import Paginator from django.db.models import Count from django.shortcuts import get_object_or_404,", "# return render(request, 'new_post.html', {'form': form}) # @login_required class NewPostCreateView(LoginRequiredMixin, CreateView): model =", "= PostForm template_name = 'new_post.html' extra_context = { 'title': 'Создать новою запись' }", "import reverse from django.utils import timezone from requests import request from .forms import", "для удаления комментария к публикации''' comment = get_object_or_404(Comment, post=post_id, pk=comment_id) if request.user ==", "# following = False # if request.user.is_authenticated: # if Follow.objects.filter(author=user_profile, # user=request.user).exists(): #", "form = CommentForm(instance=comment) # return render(request, \"new_post.html\", {'form': form, 'title': title}) class CommentEditView(LoginRequiredMixin,", "# 'commented_post')).select_related('author', 'group'), # pk=post_id) # post_comment = Comment.objects.filter( # post=post_id).select_related('author').order_by(\"-created\").all() # form", "return query_set.filter( author=profile).select_related( 'group', 'author') # @login_required # def post_edit(request, username, post_id): #", "DeleteView): model = Post template_name = 'post_delete.html' slug_field = 'username' pk_url_kwarg = 'post_id'", "add_group(request): # '''Страница для добавления группы''' # title = 'Создать группу' # if", "else: # form = PostForm() # return render(request, 'new_post.html', {'form': form}) # @login_required", "} # def group_posts(request, slug): # '''Страница с публикиями связанными с группой''' #", "# if request.method == 'POST': # form = CommentForm(request.POST) # if form.is_valid(): #", "'author', 'group') # @login_required # def new_post(request): # '''Страница создания новой публикации''' #", "files=request.FILES or None) # if form.is_valid(): # post = form.save(commit=False) # post.author =", "username=post.author) class PostDeleteView(LoginRequiredMixin, DeleteView): model = Post template_name = 'post_delete.html' slug_field = 'username'", "'author', 'group').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list, 10) # page_number", "return render(request, 'post_view.html', {'post': post, # 'profile': user_profile, # 'comments': post_comment, # 'form':", "def get_queryset(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) query_set =", "self.request.user comment.save() return super().form_valid(form) def get_success_url(self): return reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk })", "'''Страница с публикациями избранных пользователей''' # follow_page = True # post_list = Post.objects.filter(", "}) # @login_required # def add_group(request): # '''Страница для добавления группы''' # title", "5 context_object_name = 'post_list' @property def extra_context(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower',", "post_id=post_id) # @login_required # def edit_comment(request, username, post_id, comment_id): # '''Функция для редактирования", "post_id): # '''Страница редактирования публикации''' # title = 'Редактировать запись' # post =", "request.user == comment.author: comment.delete() return redirect('post', username=username, post_id=post_id) # @login_required # def edit_comment(request,", "from .forms import CommentForm, GroupForm, PostForm from .models import Comment, Follow, Group, Post,", "# '''Страница с публикиями связанными с группой''' # group = get_object_or_404(Group, slug=slug) #", "timezone.now() # comment.save() # return redirect('post', username=username, post_id=post_id) # form = CommentForm(instance=comment) #", "# following = True # return render(request, \"profile.html\", {'profile': user_profile, # 'post_list': page,", "paginator}) class GroupPostView(ListView): model = Post template_name = 'group.html' paginate_by = 5 context_object_name", "class GroupPostView(ListView): model = Post template_name = 'group.html' paginate_by = 5 context_object_name =", "distinct=True), # following_count=Count('following', distinct=True), # post_count=Count('post_author', distinct=True))) # post = get_object_or_404( # Post.objects.annotate(", "= Post slug_field = 'username' pk_url_kwarg = 'post_id' form_class = PostForm template_name =", "form = PostForm(request.POST or None, # files=request.FILES or None, # instance=post) # if", "'Создать группу' } def get_success_url(self, **kwargs): return reverse('group', kwargs={'slug': self.object.slug}) def page_not_found(request, exception):", "form.save(commit=False) # post.pub_date = timezone.now() # post.save() # return redirect('post', post_id=post.pk, username=username) #", "= self.request.user post.save() messages.add_message( self.request, messages.SUCCESS, f'Новая запись добавлена' ) return super().form_valid(form) def", "form = CommentForm(request.POST) # if form.is_valid(): # comment = form.save(commit=False) # comment.post =", "= Post template_name = 'follow.html' paginate_by = 5 context_object_name = 'post_list' extra_context =", "'username' pk_url_kwarg = 'post_id' form_class = PostForm template_name = 'new_post.html' extra_context = {", "= Post.objects.filter( # group=group).select_related( # 'author', 'group').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator", "# return redirect('post', post_id=post_id, username=username) # return redirect('post', post_id=post_id, username=username) class AddCommentView(LoginRequiredMixin, CreateView):", "# comment = get_object_or_404(Comment, post=post_id, pk=comment_id) # if request.user == comment.author: # if", "== 'POST': # form = GroupForm(request.POST) # if form.is_valid(): # slug = form.cleaned_data['slug']", "запись' } def form_valid(self, form): post = form.save(commit=False) post.pub_date = timezone.now() post.save() messages.add_message(", "self.object.author, }) # @login_required # def post_delete(request, username, post_id): # '''Функция для удаления", "form.is_valid(): # post = form.save(commit=False) # post.pub_date = timezone.now() # post.save() # return", "def delete_comment(request, username, post_id, comment_id): '''Функция для удаления комментария к публикации''' comment =", "form_class = GroupForm extra_context = { 'title': 'Создать группу' } def get_success_url(self, **kwargs):", "} def get_success_url(self, **kwargs): return reverse('group', kwargs={'slug': self.object.slug}) def page_not_found(request, exception): '''Страница 404'''", "redirect('post', post_id=post_id, username=username) # return redirect('post', post_id=post_id, username=username) class AddCommentView(LoginRequiredMixin, CreateView): model =", "if Follow.objects.filter(author=user_profile, user=self.request.user).exists(): following = True return {'post': post, 'profile': user_profile, 'form': CommentForm(),", "# @login_required # def follow_index(request): # '''Страница с публикациями избранных пользователей''' # follow_page", "form.is_valid(): # comment = form.save(commit=False) # comment.created = timezone.now() # comment.save() # return", "CommentForm(instance=comment) # return render(request, \"new_post.html\", {'form': form, 'title': title}) class CommentEditView(LoginRequiredMixin, UpdateView): model", "form.save(commit=False) # post.author = request.user # post.save() # messages.add_message( # request, messages.SUCCESS, f'Новая", "'Запись удалена' def get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author, }) # @login_required #", "# follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True), # post_count=Count('post_author', distinct=True))) # post = get_object_or_404(", "return redirect('profile', username=username) @login_required def delete_comment(request, username, post_id, comment_id): '''Функция для удаления комментария", "follover.delete() return redirect('profile', username=username) @login_required def delete_comment(request, username, post_id, comment_id): '''Функция для удаления", "template_name = 'index.html' paginate_by = 5 context_object_name = 'post_list' extra_context = { 'index_page':", "= get_object_or_404(Comment, post=post_id, pk=comment_id) if request.user == comment.author: comment.delete() return redirect('post', username=username, post_id=post_id)", "GroupAddView(LoginRequiredMixin, CreateView): model = Group template_name = 'new_post.html' form_class = GroupForm extra_context =", "# post = get_object_or_404(Post.objects.select_related('author'), pk=post_id) # if request.user == post.author: # if request.method", "= False if self.request.user.is_authenticated: if Follow.objects.filter(author=profile, user=self.request.user).exists(): following = True return { 'profile':", "= form.cleaned_data['slug'] # form.save() # return redirect(\"group\", slug=slug) # return render(request, \"new_post.html\", {'form':", "get_object_or_404, redirect, render from django.urls import reverse from django.utils import timezone from requests", "template_name = 'group.html' paginate_by = 5 context_object_name = 'post_list' @property def extra_context(self): return", "= 'Редактировать комментарий' # comment = get_object_or_404(Comment, post=post_id, pk=comment_id) # if request.user ==", "публикации''' # post = get_object_or_404(Post, pk=post_id) # if request.user == post.author: # post.delete()", "= 'post_list' @property def extra_context(self): return { 'group': get_object_or_404(Group, slug=self.kwargs['slug']) } def get_queryset(self,", "render(request, \"group.html\", {'group': group, # 'post_list': page, # 'paginator': paginator}) class GroupPostView(ListView): model", "self.request.user.is_authenticated: if Follow.objects.filter(author=profile, user=self.request.user).exists(): following = True return { 'profile': profile, 'following': following", "запись' # post = get_object_or_404(Post.objects.select_related('author'), pk=post_id) # if request.user == post.author: # if", "return query_set.filter( post=self.kwargs['post_id']).select_related('author') # def profile(request, username): # '''Страница с публикациями пользователя''' #", "model = Post template_name = 'post_delete.html' slug_field = 'username' pk_url_kwarg = 'post_id' success_message", "django.db.models import Count from django.shortcuts import get_object_or_404, redirect, render from django.urls import reverse", "username=username) class AddCommentView(LoginRequiredMixin, CreateView): model = Comment template_name = 'comments.html' form_class = CommentForm", "form.cleaned_data['slug'] # form.save() # return redirect(\"group\", slug=slug) # return render(request, \"new_post.html\", {'form': form,", "\"POST\": # form = PostForm(request.POST or None, # files=request.FILES or None, # instance=post)", "model = Comment template_name = 'new_post.html' form_class = CommentForm pk_url_kwarg = 'comment_id' extra_context", "# if request.method == 'POST': # form = PostForm(request.POST, files=request.FILES or None) #", "# post_list = Post.objects.filter( # author__following__user=request.user).select_related( # 'group', 'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all()", "template_name = 'new_post.html' form_class = GroupForm extra_context = { 'title': 'Создать группу' }", "post.author: # if request.method == \"POST\": # form = PostForm(request.POST or None, #", "CommentForm(), 'following': following} def get_queryset(self): query_set = super().get_queryset() return query_set.filter( post=self.kwargs['post_id']).select_related('author') # def", "slug=slug) # post_list = Post.objects.filter( # group=group).select_related( # 'author', 'group').annotate( # comment_count=Count( #", "PostForm(instance=post) # else: # return redirect('post', post_id=post.pk, username=post.author) # return render( # request,", "# '''Страница отдельной публикации''' # user_profile = get_object_or_404( # User.objects.filter(username=username).annotate( # follower_count=Count('follower', distinct=True),", "= Comment template_name = 'comments.html' form_class = CommentForm slug_field = 'username' pk_url_kwarg =", "django.shortcuts import get_object_or_404, redirect, render from django.urls import reverse from django.utils import timezone", "class PostEditUpdateView(LoginRequiredMixin, UpdateView): model = Post slug_field = 'username' pk_url_kwarg = 'post_id' form_class", "= { 'title': 'Редактировать запись' } def form_valid(self, form): post = form.save(commit=False) post.pub_date", "self.object.author, }) # @login_required # def add_comment(request, username, post_id): # '''Функция для добавления", "author=profile).select_related( 'group', 'author') # @login_required # def post_edit(request, username, post_id): # '''Страница редактирования", "ProfileView(ListView): model = Post template_name = 'profile.html' paginate_by = 5 context_object_name = 'post_list'", "{'page': page, # 'paginator': paginator, # 'follow_page': follow_page}) class FollowIndexView(LoginRequiredMixin, ListView): model =", "# '''Функция для добавления комментария к публикации''' # post = get_object_or_404(Post, pk=post_id) #", "добавлена', extra_tags='success' # ) # return redirect('index') # else: # form = PostForm()", "10) # page_number = request.GET.get('page') # page = paginator.get_page(page_number) # following = False", "= { 'follow_page': True } def get_queryset(self): query_set = super().get_queryset() return query_set.filter( author__following__user=self.request.user).select_related(", "# form = PostForm(request.POST or None, # files=request.FILES or None, # instance=post) #", "get_object_or_404(Post, pk=post_id) # if request.user == post.author: # post.delete() # return redirect('profile', username=username)", "= get_object_or_404(Comment, post=post_id, pk=comment_id) # if request.user == comment.author: # if request.method ==", ".forms import CommentForm, GroupForm, PostForm from .models import Comment, Follow, Group, Post, User", "# form = CommentForm(instance=comment) # return render(request, \"new_post.html\", {'form': form, 'title': title}) class", "post = form.save(commit=False) post.pub_date = timezone.now() post.save() messages.add_message( self.request, messages.SUCCESS, f'Запись обновлена!', extra_tags='success'", "form.is_valid(): # comment = form.save(commit=False) # comment.post = post # comment.author = request.user", "'''Страница создания новой публикации''' # if request.method == 'POST': # form = PostForm(request.POST,", "# return redirect('post', post_id=post.pk, username=post.author) class PostDeleteView(LoginRequiredMixin, DeleteView): model = Post template_name =", "= { 'title': 'Создать новою запись' } def form_valid(self, form): post = form.save(commit=False)", "post.save() # return redirect('post', post_id=post.pk, username=username) # else: # form = PostForm(instance=post) #", "user_profile = get_object_or_404( # User.objects.filter( # username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True)))", "profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) following = False if", "# comment.author = request.user # comment.save() # return redirect('post', post_id=post_id, username=username) # return", "request, messages.SUCCESS, f'Новая запись добавлена', extra_tags='success' # ) # return redirect('index') # else:", "post_id=post.pk, username=post.author) # return render( # request, \"new_post.html\", {'form': form, 'title': title, 'post':", "extra_context = { 'title': 'Редактировать комментарий' } def form_valid(self, form): comment = form.save(commit=False)", "# post.save() # messages.add_message( # request, messages.SUCCESS, f'Новая запись добавлена', extra_tags='success' # )", "публикиями связанными с группой''' # group = get_object_or_404(Group, slug=slug) # post_list = Post.objects.filter(", "# post_count=Count('post_author', distinct=True))) # post = get_object_or_404( # Post.objects.annotate( # comment_count=Count( # 'commented_post')).select_related('author',", "# return render(request, \"new_post.html\", {'form': form, 'title': title}) # form = GroupForm() #", "Paginator from django.db.models import Count from django.shortcuts import get_object_or_404, redirect, render from django.urls", "post.save() messages.add_message( self.request, messages.SUCCESS, f'Запись обновлена!', extra_tags='success' ) return super().form_valid(form) def get_success_url(self): return", "User.objects.filter(username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True), post_count=Count('post_author', distinct=True))) post = get_object_or_404( Post.objects.annotate( comment_count=Count( 'commented_post')).select_related('author',", "'post_list' @property def extra_context(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True)))", "= get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) following = False if self.request.user.is_authenticated:", "PostForm from .models import Comment, Follow, Group, Post, User from django.views.generic import (ListView,", "# page = paginator.get_page(page_number) # return render(request, \"group.html\", {'group': group, # 'post_list': page,", "comment.created = timezone.now() comment.save() messages.add_message( self.request, messages.SUCCESS, f'Коментарий отредактирован', ) return super().form_valid(form) def", "# slug = form.cleaned_data['slug'] # form.save() # return redirect(\"group\", slug=slug) # return render(request,", "self.object.author, 'post_id': self.object.post.pk }) # @login_required # def follow_index(request): # '''Страница с публикациями", "публикации''' comment = get_object_or_404(Comment, post=post_id, pk=comment_id) if request.user == comment.author: comment.delete() return redirect('post',", "messages.SUCCESS, f'Новая запись добавлена', extra_tags='success' # ) # return redirect('index') # else: #", "username): '''Функция для отписки от пользователя''' follover = Follow.objects.filter(author__username=username, user=request.user) follover.delete() return redirect('profile',", "{'form': form, 'title': title}) class GroupAddView(LoginRequiredMixin, CreateView): model = Group template_name = 'new_post.html'", "post_id): # '''Функция для удаления публикации''' # post = get_object_or_404(Post, pk=post_id) # if", "or None, # files=request.FILES or None, # instance=post) # if form.is_valid(): # post", "class IndexListView(ListView): model = Post template_name = 'index.html' paginate_by = 5 context_object_name =", "# if form.is_valid(): # post = form.save(commit=False) # post.author = request.user # post.save()", "form, 'title': title}) class GroupAddView(LoginRequiredMixin, CreateView): model = Group template_name = 'new_post.html' form_class", "from django.core.paginator import Paginator from django.db.models import Count from django.shortcuts import get_object_or_404, redirect,", "Post slug_field = 'username' pk_url_kwarg = 'post_id' form_class = PostForm template_name = 'new_post.html'", "= form.save(commit=False) # post.author = request.user # post.save() # messages.add_message( # request, messages.SUCCESS,", "page = paginator.get_page(page_number) # following = False # if request.user.is_authenticated: # if Follow.objects.filter(author=user_profile,", "if Follow.objects.filter(user=request.user, author=followed_author).exists(): return redirect('profile', username=username) Follow.objects.create(author=followed_author, user=request.user) return redirect('profile', username=username) @login_required def", "'comments.html' form_class = CommentForm slug_field = 'username' pk_url_kwarg = 'post_id' def form_valid(self, form):", "render(request, \"misc/404.html\", {\"path\": request.path}, status=404) def server_error(request): '''Страница 500''' return render(request, \"misc/500.html\", status=500)", "} def get_queryset(self, *args, **kwargs): query_set = super().get_queryset() return query_set.filter( group__slug=self.kwargs['slug']).select_related( 'author', 'group')", "query_set = super().get_queryset() return query_set.filter( author=profile).select_related( 'group', 'author') # @login_required # def post_edit(request,", "'title': title}) class CommentEditView(LoginRequiredMixin, UpdateView): model = Comment template_name = 'new_post.html' form_class =", "# comment.post = post # comment.author = request.user # comment.save() # return redirect('post',", "username, post_id, comment_id): '''Функция для удаления комментария к публикации''' comment = get_object_or_404(Comment, post=post_id,", "username=username) if followed_author == request.user: return redirect('profile', username=username) if Follow.objects.filter(user=request.user, author=followed_author).exists(): return redirect('profile',", "form_valid(self, form): post = form.save(commit=False) post.pub_date = timezone.now() post.save() messages.add_message( self.request, messages.SUCCESS, f'Запись", "following_count=Count('following', distinct=True))) # post_list = Post.objects.filter( # author=user_profile).select_related( # 'group', 'author').annotate( # comment_count=Count(", "# return render(request, \"group.html\", {'group': group, # 'post_list': page, # 'paginator': paginator}) class", "super().get_queryset() return query_set.filter( post=self.kwargs['post_id']).select_related('author') # def profile(request, username): # '''Страница с публикациями пользователя'''", "distinct=True))) # post_list = Post.objects.filter( # author=user_profile).select_related( # 'group', 'author').annotate( # comment_count=Count( #", "Paginator(post_list, 10) # page_number = request.GET.get('page') # page = paginator.get_page(page_number) # following =", "# comment.save() # return redirect('post', username=username, post_id=post_id) # form = CommentForm(instance=comment) # return", "@login_required # def add_group(request): # '''Страница для добавления группы''' # title = 'Создать", "class PostDeleteView(LoginRequiredMixin, DeleteView): model = Post template_name = 'post_delete.html' slug_field = 'username' pk_url_kwarg", "follow_page = True # post_list = Post.objects.filter( # author__following__user=request.user).select_related( # 'group', 'author').annotate( #", "timezone.now() post.save() messages.add_message( self.request, messages.SUCCESS, f'Запись обновлена!', extra_tags='success' ) return super().form_valid(form) def get_success_url(self):", "= 'comment_id' extra_context = { 'title': 'Редактировать комментарий' } def form_valid(self, form): comment", "render from django.urls import reverse from django.utils import timezone from requests import request", "= 'new_post.html' extra_context = { 'title': 'Редактировать запись' } def form_valid(self, form): post", "# if request.user == comment.author: # if request.method == 'POST': # form =", "return { 'group': get_object_or_404(Group, slug=self.kwargs['slug']) } def get_queryset(self, *args, **kwargs): query_set = super().get_queryset()", "user_profile, # 'comments': post_comment, # 'form': form, # 'following': following}) class PostView(ListView): model", "= GroupForm() # return render(request, \"new_post.html\", {'form': form, 'title': title}) class GroupAddView(LoginRequiredMixin, CreateView):", "публикации''' # if request.method == 'POST': # form = PostForm(request.POST, files=request.FILES or None)", "get_object_or_404(Comment, post=post_id, pk=comment_id) if request.user == comment.author: comment.delete() return redirect('post', username=username, post_id=post_id) #", "*args, **kwargs): query_set = super().get_queryset() return query_set.filter( group__slug=self.kwargs['slug']).select_related( 'author', 'group') # @login_required #", "комментария к публикации''' # title = 'Редактировать комментарий' # comment = get_object_or_404(Comment, post=post_id,", "page_number = request.GET.get('page') # page = paginator.get_page(page_number) # return render(request, \"group.html\", {'group': group,", "username=username) # else: # form = PostForm(instance=post) # else: # return redirect('post', post_id=post.pk,", "if form.is_valid(): # comment = form.save(commit=False) # comment.post = post # comment.author =", "class CommentEditView(LoginRequiredMixin, UpdateView): model = Comment template_name = 'new_post.html' form_class = CommentForm pk_url_kwarg", "# post = form.save(commit=False) # post.pub_date = timezone.now() # post.save() # return redirect('post',", "добавлена' ) return super().form_valid(form) def get_success_url(self): return reverse('index') # def post_view(request, post_id, username):", "return super().form_valid(form) def get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author, }) # @login_required #", "reverse('index') # def post_view(request, post_id, username): # '''Страница отдельной публикации''' # user_profile =", "comment = form.save(commit=False) comment.post = post comment.author = self.request.user comment.save() return super().form_valid(form) def", "Follow.objects.create(author=followed_author, user=request.user) return redirect('profile', username=username) @login_required def profile_unfollow(request, username): '''Функция для отписки от", "'username': self.object.author, }) # @login_required # def post_delete(request, username, post_id): # '''Функция для", "user_profile = get_object_or_404( # User.objects.filter(username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True), # post_count=Count('post_author',", "form = PostForm() # return render(request, 'new_post.html', {'form': form}) # @login_required class NewPostCreateView(LoginRequiredMixin,", "pk=post_id) # if request.user == post.author: # post.delete() # return redirect('profile', username=username) #", "'username' pk_url_kwarg = 'post_id' def form_valid(self, form): post = get_object_or_404(Post, pk=self.kwargs['post_id']) comment =", "redirect('profile', username=username) if Follow.objects.filter(user=request.user, author=followed_author).exists(): return redirect('profile', username=username) Follow.objects.create(author=followed_author, user=request.user) return redirect('profile', username=username)", "# form = GroupForm(request.POST) # if form.is_valid(): # slug = form.cleaned_data['slug'] # form.save()", "# post = get_object_or_404(Post, pk=post_id) # if request.user == post.author: # post.delete() #", "def form_valid(self, form): post = form.save(commit=False) post.author = self.request.user post.save() messages.add_message( self.request, messages.SUCCESS,", "# request, \"new_post.html\", {'form': form, 'title': title, 'post': post}) class PostEditUpdateView(LoginRequiredMixin, UpdateView): model", "distinct=True), post_count=Count('post_author', distinct=True))) post = get_object_or_404( Post.objects.annotate( comment_count=Count( 'commented_post')).select_related('author', 'group'), pk=self.kwargs['post_id']) following =", "= 'post_id' success_message = 'Запись удалена' def get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author,", "публикациями избранных пользователей''' # follow_page = True # post_list = Post.objects.filter( # author__following__user=request.user).select_related(", "# post.save() # return redirect('post', post_id=post.pk, username=username) # else: # form = PostForm(instance=post)", "None, # instance=post) # if form.is_valid(): # post = form.save(commit=False) # post.pub_date =", "# def post_edit(request, username, post_id): # '''Страница редактирования публикации''' # title = 'Редактировать", "get_queryset(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) query_set = super().get_queryset()", "template_name = 'new_post.html' form_class = CommentForm pk_url_kwarg = 'comment_id' extra_context = { 'title':", "return redirect('profile', username=username) @login_required def profile_unfollow(request, username): '''Функция для отписки от пользователя''' follover", "'title': 'Редактировать запись' } def form_valid(self, form): post = form.save(commit=False) post.pub_date = timezone.now()", "template_name = 'post_view.html' context_object_name = 'comments' @property def extra_context(self): user_profile = get_object_or_404( User.objects.filter(username=self.kwargs['username']).annotate(", "'Создать новою запись' } def form_valid(self, form): post = form.save(commit=False) post.author = self.request.user", "# def add_group(request): # '''Страница для добавления группы''' # title = 'Создать группу'", "# pk=post_id) # post_comment = Comment.objects.filter( # post=post_id).select_related('author').order_by(\"-created\").all() # form = CommentForm() #", "instance=post) # if form.is_valid(): # post = form.save(commit=False) # post.pub_date = timezone.now() #", "= get_object_or_404(User, username=username) if followed_author == request.user: return redirect('profile', username=username) if Follow.objects.filter(user=request.user, author=followed_author).exists():", "# '''Функция для редактирования комментария к публикации''' # title = 'Редактировать комментарий' #", "CommentForm pk_url_kwarg = 'comment_id' extra_context = { 'title': 'Редактировать комментарий' } def form_valid(self,", "from django.utils import timezone from requests import request from .forms import CommentForm, GroupForm,", "'comments' @property def extra_context(self): user_profile = get_object_or_404( User.objects.filter(username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True), post_count=Count('post_author',", "# if request.method == \"POST\": # form = PostForm(request.POST or None, # files=request.FILES", "= 'post_view.html' context_object_name = 'comments' @property def extra_context(self): user_profile = get_object_or_404( User.objects.filter(username=self.kwargs['username']).annotate( follower_count=Count('follower',", "messages.add_message( self.request, messages.SUCCESS, f'Новая запись добавлена' ) return super().form_valid(form) def get_success_url(self): return reverse('index')", "с группой''' # group = get_object_or_404(Group, slug=slug) # post_list = Post.objects.filter( # group=group).select_related(", "get_object_or_404(Post, pk=post_id) # if request.method == 'POST': # form = CommentForm(request.POST) # if", "@login_required def delete_comment(request, username, post_id, comment_id): '''Функция для удаления комментария к публикации''' comment", "удаления публикации''' # post = get_object_or_404(Post, pk=post_id) # if request.user == post.author: #", "render(request, \"new_post.html\", {'form': form, 'title': title}) class CommentEditView(LoginRequiredMixin, UpdateView): model = Comment template_name", "# '''Страница для добавления группы''' # title = 'Создать группу' # if request.method", "delete_comment(request, username, post_id, comment_id): '''Функция для удаления комментария к публикации''' comment = get_object_or_404(Comment,", "'profile': user_profile, 'form': CommentForm(), 'following': following} def get_queryset(self): query_set = super().get_queryset() return query_set.filter(", "import Paginator from django.db.models import Count from django.shortcuts import get_object_or_404, redirect, render from", "публикациями пользователя''' # user_profile = get_object_or_404( # User.objects.filter( # username=username).annotate( # follower_count=Count('follower', distinct=True),", "# return redirect('post', post_id=post_id, username=username) class AddCommentView(LoginRequiredMixin, CreateView): model = Comment template_name =", "= get_object_or_404( Post.objects.annotate( comment_count=Count( 'commented_post')).select_related('author', 'group'), pk=self.kwargs['post_id']) following = False if self.request.user.is_authenticated: if", "paginator, # 'follow_page': follow_page}) class FollowIndexView(LoginRequiredMixin, ListView): model = Post template_name = 'follow.html'", "form.save(commit=False) # comment.post = post # comment.author = request.user # comment.save() # return", "'following': following} def get_queryset(self): query_set = super().get_queryset() return query_set.filter( post=self.kwargs['post_id']).select_related('author') # def profile(request,", "= super().get_queryset() return query_set.filter( author__following__user=self.request.user).select_related( 'group', 'author') @login_required def profile_follow(request, username): '''Функция для", "PostForm(request.POST, files=request.FILES or None) # if form.is_valid(): # post = form.save(commit=False) # post.author", "follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) query_set = super().get_queryset() return query_set.filter( author=profile).select_related( 'group', 'author') #", "# 'paginator': paginator, # 'follow_page': follow_page}) class FollowIndexView(LoginRequiredMixin, ListView): model = Post template_name", "form_valid(self, form): comment = form.save(commit=False) comment.created = timezone.now() comment.save() messages.add_message( self.request, messages.SUCCESS, f'Коментарий", "return redirect('post', post_id=post_id, username=username) # return redirect('post', post_id=post_id, username=username) class AddCommentView(LoginRequiredMixin, CreateView): model", "True return { 'profile': profile, 'following': following } def get_queryset(self): profile = get_object_or_404(", "'''Страница 404''' return render(request, \"misc/404.html\", {\"path\": request.path}, status=404) def server_error(request): '''Страница 500''' return", "# post = form.save(commit=False) # post.author = request.user # post.save() # messages.add_message( #", "class AddCommentView(LoginRequiredMixin, CreateView): model = Comment template_name = 'comments.html' form_class = CommentForm slug_field", "comment.author = request.user # comment.save() # return redirect('post', post_id=post_id, username=username) # return redirect('post',", "= Post template_name = 'profile.html' paginate_by = 5 context_object_name = 'post_list' @property def", "post = get_object_or_404( # Post.objects.annotate( # comment_count=Count( # 'commented_post')).select_related('author', 'group'), # pk=post_id) #", "False # if request.user.is_authenticated: # if Follow.objects.filter(author=user_profile, # user=request.user).exists(): # following = True", "'following': following}) class PostView(ListView): model = Comment template_name = 'post_view.html' context_object_name = 'comments'", "post # comment.author = request.user # comment.save() # return redirect('post', post_id=post_id, username=username) #", "redirect('profile', username=username) @login_required def profile_unfollow(request, username): '''Функция для отписки от пользователя''' follover =", "# return render( # request, \"new_post.html\", {'form': form, 'title': title, 'post': post}) class", "edit_comment(request, username, post_id, comment_id): # '''Функция для редактирования комментария к публикации''' # title", "'POST': # form = PostForm(request.POST, files=request.FILES or None) # if form.is_valid(): # post", "if request.user.is_authenticated: # if Follow.objects.filter(author=user_profile, # user=request.user).exists(): # following = True # return", "}) # @login_required # def post_delete(request, username, post_id): # '''Функция для удаления публикации'''", "query_set.filter( group__slug=self.kwargs['slug']).select_related( 'author', 'group') # @login_required # def new_post(request): # '''Страница создания новой", "return redirect('post', post_id=post.pk, username=post.author) class PostDeleteView(LoginRequiredMixin, DeleteView): model = Post template_name = 'post_delete.html'", "username, post_id): # '''Функция для добавления комментария к публикации''' # post = get_object_or_404(Post,", "messages.add_message( self.request, messages.SUCCESS, f'Коментарий отредактирован', ) return super().form_valid(form) def get_success_url(self): return reverse('post', kwargs={'username':", "def add_group(request): # '''Страница для добавления группы''' # title = 'Создать группу' #", "# form = PostForm(instance=post) # else: # return redirect('post', post_id=post.pk, username=post.author) # return", "'post_id': self.object.post.pk }) # @login_required # def follow_index(request): # '''Страница с публикациями избранных", "'title': 'Редактировать комментарий' } def form_valid(self, form): comment = form.save(commit=False) comment.created = timezone.now()", "def get_success_url(self): return reverse('index') # def post_view(request, post_id, username): # '''Страница отдельной публикации'''", "profile(request, username): # '''Страница с публикациями пользователя''' # user_profile = get_object_or_404( # User.objects.filter(", "django.utils import timezone from requests import request from .forms import CommentForm, GroupForm, PostForm", "# page_number = request.GET.get('page') # page = paginator.get_page(page_number) # return render(request, \"follow.html\", {'page':", "# 'paginator': paginator}) class GroupPostView(ListView): model = Post template_name = 'group.html' paginate_by =", "{ 'group': get_object_or_404(Group, slug=self.kwargs['slug']) } def get_queryset(self, *args, **kwargs): query_set = super().get_queryset() return", "# '''Страница создания новой публикации''' # if request.method == 'POST': # form =", "else: # form = PostForm(instance=post) # else: # return redirect('post', post_id=post.pk, username=post.author) #", "request.method == 'POST': # form = CommentForm(request.POST) # if form.is_valid(): # comment =", "following}) class PostView(ListView): model = Comment template_name = 'post_view.html' context_object_name = 'comments' @property", "render(request, \"profile.html\", {'profile': user_profile, # 'post_list': page, # 'paginator': paginator, # 'following': following})", "get_object_or_404( # User.objects.filter(username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True), # post_count=Count('post_author', distinct=True))) #", "(ListView, DetailView, CreateView, UpdateView, DeleteView) class IndexListView(ListView): model = Post template_name = 'index.html'", "# page_number = request.GET.get('page') # page = paginator.get_page(page_number) # following = False #", "# if Follow.objects.filter(author=user_profile, # user=request.user).exists(): # following = True # return render(request, 'post_view.html',", "post.author = self.request.user post.save() messages.add_message( self.request, messages.SUCCESS, f'Новая запись добавлена' ) return super().form_valid(form)", "username=username) @login_required def profile_unfollow(request, username): '''Функция для отписки от пользователя''' follover = Follow.objects.filter(author__username=username,", "from django.views.generic import (ListView, DetailView, CreateView, UpdateView, DeleteView) class IndexListView(ListView): model = Post", "публикации''' # title = 'Редактировать комментарий' # comment = get_object_or_404(Comment, post=post_id, pk=comment_id) #", "django.contrib import messages from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.core.paginator", "form, 'title': title}) # form = GroupForm() # return render(request, \"new_post.html\", {'form': form,", "get_object_or_404(Post.objects.select_related('author'), pk=post_id) # if request.user == post.author: # if request.method == \"POST\": #", "form_class = PostForm template_name = 'new_post.html' extra_context = { 'title': 'Создать новою запись'", "follow_page}) class FollowIndexView(LoginRequiredMixin, ListView): model = Post template_name = 'follow.html' paginate_by = 5", ") # return redirect('index') # else: # form = PostForm() # return render(request,", "5 context_object_name = 'post_list' @property def extra_context(self): return { 'group': get_object_or_404(Group, slug=self.kwargs['slug']) }", "Follow.objects.filter(author=user_profile, user=self.request.user).exists(): following = True return {'post': post, 'profile': user_profile, 'form': CommentForm(), 'following':", "def get_queryset(self): query_set = super().get_queryset() return query_set.filter( author__following__user=self.request.user).select_related( 'group', 'author') @login_required def profile_follow(request,", "form.is_valid(): # slug = form.cleaned_data['slug'] # form.save() # return redirect(\"group\", slug=slug) # return", "return redirect('index') # else: # form = PostForm() # return render(request, 'new_post.html', {'form':", "комментария к публикации''' # post = get_object_or_404(Post, pk=post_id) # if request.method == 'POST':", "= 'new_post.html' form_class = GroupForm extra_context = { 'title': 'Создать группу' } def", "# page = paginator.get_page(page_number) # following = False # if request.user.is_authenticated: # if", "following_count=Count('following', distinct=True))) following = False if self.request.user.is_authenticated: if Follow.objects.filter(author=profile, user=self.request.user).exists(): following = True", "CommentForm(request.POST) # if form.is_valid(): # comment = form.save(commit=False) # comment.post = post #", "followed_author = get_object_or_404(User, username=username) if followed_author == request.user: return redirect('profile', username=username) if Follow.objects.filter(user=request.user,", "= Comment template_name = 'new_post.html' form_class = CommentForm pk_url_kwarg = 'comment_id' extra_context =", "following = False if self.request.user.is_authenticated: if Follow.objects.filter(author=user_profile, user=self.request.user).exists(): following = True return {'post':", "paginator, # 'following': following}) class ProfileView(ListView): model = Post template_name = 'profile.html' paginate_by", "'post_id': self.object.post.pk }) # @login_required # def add_group(request): # '''Страница для добавления группы'''", "'group') # @login_required # def new_post(request): # '''Страница создания новой публикации''' # if", "user=request.user).exists(): # following = True # return render(request, 'post_view.html', {'post': post, # 'profile':", "'new_post.html' extra_context = { 'title': 'Редактировать запись' } def form_valid(self, form): post =", "'Редактировать запись' } def form_valid(self, form): post = form.save(commit=False) post.pub_date = timezone.now() post.save()", "return render(request, \"misc/404.html\", {\"path\": request.path}, status=404) def server_error(request): '''Страница 500''' return render(request, \"misc/500.html\",", "# post = get_object_or_404(Post, pk=post_id) # if request.method == 'POST': # form =", "# username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True))) # post_list = Post.objects.filter( #", "paginator.get_page(page_number) # return render(request, \"group.html\", {'group': group, # 'post_list': page, # 'paginator': paginator})", "удалена' def get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author, }) # @login_required # def", "page_number = request.GET.get('page') # page = paginator.get_page(page_number) # following = False # if", "FollowIndexView(LoginRequiredMixin, ListView): model = Post template_name = 'follow.html' paginate_by = 5 context_object_name =", "template_name = 'profile.html' paginate_by = 5 context_object_name = 'post_list' @property def extra_context(self): profile", "context_object_name = 'post_list' @property def extra_context(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True),", "'group.html' paginate_by = 5 context_object_name = 'post_list' @property def extra_context(self): return { 'group':", "form_valid(self, form): post = form.save(commit=False) post.author = self.request.user post.save() messages.add_message( self.request, messages.SUCCESS, f'Новая", "import messages from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.core.paginator import", "# 'following': following}) class PostView(ListView): model = Comment template_name = 'post_view.html' context_object_name =", "redirect, render from django.urls import reverse from django.utils import timezone from requests import", "# 'comments': post_comment, # 'form': form, # 'following': following}) class PostView(ListView): model =", "return redirect('post', post_id=post.pk, username=username) # else: # form = PostForm(instance=post) # else: #", "= 'group.html' paginate_by = 5 context_object_name = 'post_list' @property def extra_context(self): return {", "обновлена!', extra_tags='success' ) return super().form_valid(form) def get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author, })", "= Post.objects.filter( # author__following__user=request.user).select_related( # 'group', 'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator", "post_list = Post.objects.filter( # author__following__user=request.user).select_related( # 'group', 'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() #", "get_object_or_404( # Post.objects.annotate( # comment_count=Count( # 'commented_post')).select_related('author', 'group'), # pk=post_id) # post_comment =", "title, 'post': post}) class PostEditUpdateView(LoginRequiredMixin, UpdateView): model = Post slug_field = 'username' pk_url_kwarg", "get_object_or_404( # User.objects.filter( # username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True))) # post_list", "'title': title}) class GroupAddView(LoginRequiredMixin, CreateView): model = Group template_name = 'new_post.html' form_class =", "# @login_required # def add_comment(request, username, post_id): # '''Функция для добавления комментария к", "# 'form': form, # 'following': following}) class PostView(ListView): model = Comment template_name =", "comment.created = timezone.now() # comment.save() # return redirect('post', username=username, post_id=post_id) # form =", "= request.GET.get('page') # page = paginator.get_page(page_number) # return render(request, \"follow.html\", {'page': page, #", "# form = CommentForm(request.POST, instance=comment) # if form.is_valid(): # comment = form.save(commit=False) #", "GroupForm, PostForm from .models import Comment, Follow, Group, Post, User from django.views.generic import", "post=post_id).select_related('author').order_by(\"-created\").all() # form = CommentForm() # following = False # if request.user.is_authenticated: #", "'commented_post')).select_related('author', 'group'), pk=self.kwargs['post_id']) following = False if self.request.user.is_authenticated: if Follow.objects.filter(author=user_profile, user=self.request.user).exists(): following =", "if self.request.user.is_authenticated: if Follow.objects.filter(author=profile, user=self.request.user).exists(): following = True return { 'profile': profile, 'following':", "IndexListView(ListView): model = Post template_name = 'index.html' paginate_by = 5 context_object_name = 'post_list'", "or None) # if form.is_valid(): # post = form.save(commit=False) # post.author = request.user", "paginate_by = 5 context_object_name = 'post_list' @property def extra_context(self): profile = get_object_or_404( User.objects.filter(", "post_id, comment_id): '''Функция для удаления комментария к публикации''' comment = get_object_or_404(Comment, post=post_id, pk=comment_id)", "= post comment.author = self.request.user comment.save() return super().form_valid(form) def get_success_url(self): return reverse('post', kwargs={'username':", "# else: # return redirect('post', post_id=post.pk, username=post.author) # return render( # request, \"new_post.html\",", "profile_follow(request, username): '''Функция для подписки на пользователя''' followed_author = get_object_or_404(User, username=username) if followed_author", "= Post template_name = 'post_delete.html' slug_field = 'username' pk_url_kwarg = 'post_id' success_message =", "= request.user # comment.save() # return redirect('post', post_id=post_id, username=username) # return redirect('post', post_id=post_id,", "render(request, \"new_post.html\", {'form': form, 'title': title}) # form = GroupForm() # return render(request,", "user=request.user) return redirect('profile', username=username) @login_required def profile_unfollow(request, username): '''Функция для отписки от пользователя'''", "comment = get_object_or_404(Comment, post=post_id, pk=comment_id) if request.user == comment.author: comment.delete() return redirect('post', username=username,", "user_profile = get_object_or_404( User.objects.filter(username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True), post_count=Count('post_author', distinct=True))) post = get_object_or_404(", "# def group_posts(request, slug): # '''Страница с публикиями связанными с группой''' # group", "render(request, 'post_view.html', {'post': post, # 'profile': user_profile, # 'comments': post_comment, # 'form': form,", "GroupForm extra_context = { 'title': 'Создать группу' } def get_success_url(self, **kwargs): return reverse('group',", "DetailView, CreateView, UpdateView, DeleteView) class IndexListView(ListView): model = Post template_name = 'index.html' paginate_by", "'new_post.html' form_class = CommentForm pk_url_kwarg = 'comment_id' extra_context = { 'title': 'Редактировать комментарий'", "@login_required # def follow_index(request): # '''Страница с публикациями избранных пользователей''' # follow_page =", "slug=slug) # return render(request, \"new_post.html\", {'form': form, 'title': title}) # form = GroupForm()", "django.urls import reverse from django.utils import timezone from requests import request from .forms", "kwargs={'username': self.object.author, 'post_id': self.object.post.pk }) # @login_required # def add_group(request): # '''Страница для", "= CommentForm(request.POST) # if form.is_valid(): # comment = form.save(commit=False) # comment.post = post", "follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True))) # post_list = Post.objects.filter( # author=user_profile).select_related( # 'group',", "return redirect('post', post_id=post.pk, username=post.author) # return render( # request, \"new_post.html\", {'form': form, 'title':", "self.object.author, 'post_id': self.object.post.pk }) # @login_required # def add_group(request): # '''Страница для добавления", "form.save(commit=False) comment.created = timezone.now() comment.save() messages.add_message( self.request, messages.SUCCESS, f'Коментарий отредактирован', ) return super().form_valid(form)", "= Group template_name = 'new_post.html' form_class = GroupForm extra_context = { 'title': 'Создать", "= Paginator(post_list, 10) # page_number = request.GET.get('page') # page = paginator.get_page(page_number) # following", "import CommentForm, GroupForm, PostForm from .models import Comment, Follow, Group, Post, User from", "page_not_found(request, exception): '''Страница 404''' return render(request, \"misc/404.html\", {\"path\": request.path}, status=404) def server_error(request): '''Страница", "template_name = 'post_delete.html' slug_field = 'username' pk_url_kwarg = 'post_id' success_message = 'Запись удалена'", "extra_context = { 'title': 'Создать новою запись' } def form_valid(self, form): post =", "extra_context(self): user_profile = get_object_or_404( User.objects.filter(username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True), post_count=Count('post_author', distinct=True))) post =", "пользователя''' # user_profile = get_object_or_404( # User.objects.filter( # username=username).annotate( # follower_count=Count('follower', distinct=True), #", "return reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk }) # @login_required # def add_group(request): #", "# def new_post(request): # '''Страница создания новой публикации''' # if request.method == 'POST':", "messages.add_message( # request, messages.SUCCESS, f'Новая запись добавлена', extra_tags='success' # ) # return redirect('index')", "'paginator': paginator, # 'follow_page': follow_page}) class FollowIndexView(LoginRequiredMixin, ListView): model = Post template_name =", "instance=comment) # if form.is_valid(): # comment = form.save(commit=False) # comment.created = timezone.now() #", "Post.objects.annotate( # comment_count=Count( # 'commented_post')).select_related('author', 'group'), # pk=post_id) # post_comment = Comment.objects.filter( #", "get_object_or_404( User.objects.filter(username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True), post_count=Count('post_author', distinct=True))) post = get_object_or_404( Post.objects.annotate( comment_count=Count(", "Post, User from django.views.generic import (ListView, DetailView, CreateView, UpdateView, DeleteView) class IndexListView(ListView): model", "'commented_post')).select_related('author', 'group'), # pk=post_id) # post_comment = Comment.objects.filter( # post=post_id).select_related('author').order_by(\"-created\").all() # form =", "'comment_id' extra_context = { 'title': 'Редактировать комментарий' } def form_valid(self, form): comment =", "= get_object_or_404(Post, pk=post_id) # if request.method == 'POST': # form = CommentForm(request.POST) #", "form = GroupForm(request.POST) # if form.is_valid(): # slug = form.cleaned_data['slug'] # form.save() #", "'''Страница отдельной публикации''' # user_profile = get_object_or_404( # User.objects.filter(username=username).annotate( # follower_count=Count('follower', distinct=True), #", "запись' } def form_valid(self, form): post = form.save(commit=False) post.author = self.request.user post.save() messages.add_message(", "distinct=True), following_count=Count('following', distinct=True), post_count=Count('post_author', distinct=True))) post = get_object_or_404( Post.objects.annotate( comment_count=Count( 'commented_post')).select_related('author', 'group'), pk=self.kwargs['post_id'])", "= 'index.html' paginate_by = 5 context_object_name = 'post_list' extra_context = { 'index_page': True", "CreateView): model = Comment template_name = 'comments.html' form_class = CommentForm slug_field = 'username'", "новой публикации''' # if request.method == 'POST': # form = PostForm(request.POST, files=request.FILES or", "form_class = CommentForm slug_field = 'username' pk_url_kwarg = 'post_id' def form_valid(self, form): post", "# return redirect('post', username=username, post_id=post_id) # form = CommentForm(instance=comment) # return render(request, \"new_post.html\",", "exception): '''Страница 404''' return render(request, \"misc/404.html\", {\"path\": request.path}, status=404) def server_error(request): '''Страница 500'''", "удаления комментария к публикации''' comment = get_object_or_404(Comment, post=post_id, pk=comment_id) if request.user == comment.author:", "связанными с группой''' # group = get_object_or_404(Group, slug=slug) # post_list = Post.objects.filter( #", "django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.core.paginator import Paginator from django.db.models", "**kwargs): query_set = super().get_queryset() return query_set.filter( group__slug=self.kwargs['slug']).select_related( 'author', 'group') # @login_required # def", "отредактирован', ) return super().form_valid(form) def get_success_url(self): return reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk })", "Post template_name = 'profile.html' paginate_by = 5 context_object_name = 'post_list' @property def extra_context(self):", "username=username) # return redirect('post', post_id=post.pk, username=post.author) class PostDeleteView(LoginRequiredMixin, DeleteView): model = Post template_name", "pk=comment_id) if request.user == comment.author: comment.delete() return redirect('post', username=username, post_id=post_id) # @login_required #", "ListView): model = Post template_name = 'follow.html' paginate_by = 5 context_object_name = 'post_list'", "публикации''' # user_profile = get_object_or_404( # User.objects.filter(username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True),", "django.views.generic import (ListView, DetailView, CreateView, UpdateView, DeleteView) class IndexListView(ListView): model = Post template_name", "form.save() # return redirect(\"group\", slug=slug) # return render(request, \"new_post.html\", {'form': form, 'title': title})", "== \"POST\": # form = PostForm(request.POST or None, # files=request.FILES or None, #", "self.object.slug}) def page_not_found(request, exception): '''Страница 404''' return render(request, \"misc/404.html\", {\"path\": request.path}, status=404) def", "для удаления публикации''' # post = get_object_or_404(Post, pk=post_id) # if request.user == post.author:", "= get_object_or_404(Post, pk=post_id) # if request.user == post.author: # post.delete() # return redirect('profile',", "get_object_or_404(Group, slug=self.kwargs['slug']) } def get_queryset(self, *args, **kwargs): query_set = super().get_queryset() return query_set.filter( group__slug=self.kwargs['slug']).select_related(", "class NewPostCreateView(LoginRequiredMixin, CreateView): model = Post form_class = PostForm template_name = 'new_post.html' extra_context", "10) # page_number = request.GET.get('page') # page = paginator.get_page(page_number) # return render(request, \"follow.html\",", "Follow.objects.filter(user=request.user, author=followed_author).exists(): return redirect('profile', username=username) Follow.objects.create(author=followed_author, user=request.user) return redirect('profile', username=username) @login_required def profile_unfollow(request,", "} def form_valid(self, form): post = form.save(commit=False) post.author = self.request.user post.save() messages.add_message( self.request,", "# 'paginator': paginator, # 'following': following}) class ProfileView(ListView): model = Post template_name =", "группу' # if request.method == 'POST': # form = GroupForm(request.POST) # if form.is_valid():", "UpdateView): model = Post slug_field = 'username' pk_url_kwarg = 'post_id' form_class = PostForm", "= { 'index_page': True } # def group_posts(request, slug): # '''Страница с публикиями", "post = get_object_or_404(Post, pk=self.kwargs['post_id']) comment = form.save(commit=False) comment.post = post comment.author = self.request.user", "get_success_url(self): return reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk }) # @login_required # def add_group(request):", "# 'following': following}) class ProfileView(ListView): model = Post template_name = 'profile.html' paginate_by =", "'''Функция для редактирования комментария к публикации''' # title = 'Редактировать комментарий' # comment", "def post_view(request, post_id, username): # '''Страница отдельной публикации''' # user_profile = get_object_or_404( #", "'post_view.html', {'post': post, # 'profile': user_profile, # 'comments': post_comment, # 'form': form, #", "class ProfileView(ListView): model = Post template_name = 'profile.html' paginate_by = 5 context_object_name =", "'''Функция для подписки на пользователя''' followed_author = get_object_or_404(User, username=username) if followed_author == request.user:", "follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True), # post_count=Count('post_author', distinct=True))) # post = get_object_or_404( #", "post = form.save(commit=False) # post.pub_date = timezone.now() # post.save() # return redirect('post', post_id=post.pk,", "post_id, comment_id): # '''Функция для редактирования комментария к публикации''' # title = 'Редактировать", "для редактирования комментария к публикации''' # title = 'Редактировать комментарий' # comment =", "post=post_id, pk=comment_id) # if request.user == comment.author: # if request.method == 'POST': #", "# return render(request, \"new_post.html\", {'form': form, 'title': title}) class CommentEditView(LoginRequiredMixin, UpdateView): model =", "login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.core.paginator import Paginator from django.db.models import Count", "extra_context = { 'title': 'Редактировать запись' } def form_valid(self, form): post = form.save(commit=False)", "= request.user # post.save() # messages.add_message( # request, messages.SUCCESS, f'Новая запись добавлена', extra_tags='success'", "'post_id' def form_valid(self, form): post = get_object_or_404(Post, pk=self.kwargs['post_id']) comment = form.save(commit=False) comment.post =", "page, # 'paginator': paginator, # 'following': following}) class ProfileView(ListView): model = Post template_name", "extra_context = { 'follow_page': True } def get_queryset(self): query_set = super().get_queryset() return query_set.filter(", "return redirect('post', post_id=post_id, username=username) class AddCommentView(LoginRequiredMixin, CreateView): model = Comment template_name = 'comments.html'", "Comment template_name = 'post_view.html' context_object_name = 'comments' @property def extra_context(self): user_profile = get_object_or_404(", "import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.core.paginator import Paginator from django.db.models import", "# return render(request, \"profile.html\", {'profile': user_profile, # 'post_list': page, # 'paginator': paginator, #", "} def get_queryset(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) query_set", "= True # return render(request, \"profile.html\", {'profile': user_profile, # 'post_list': page, # 'paginator':", "'group': get_object_or_404(Group, slug=self.kwargs['slug']) } def get_queryset(self, *args, **kwargs): query_set = super().get_queryset() return query_set.filter(", "= 'Редактировать запись' # post = get_object_or_404(Post.objects.select_related('author'), pk=post_id) # if request.user == post.author:", "= 'follow.html' paginate_by = 5 context_object_name = 'post_list' extra_context = { 'follow_page': True", "reverse('group', kwargs={'slug': self.object.slug}) def page_not_found(request, exception): '''Страница 404''' return render(request, \"misc/404.html\", {\"path\": request.path},", "paginator = Paginator(post_list, 10) # page_number = request.GET.get('page') # page = paginator.get_page(page_number) #", "redirect('post', post_id=post.pk, username=post.author) # return render( # request, \"new_post.html\", {'form': form, 'title': title,", "return redirect('post', username=username, post_id=post_id) # form = CommentForm(instance=comment) # return render(request, \"new_post.html\", {'form':", "following} def get_queryset(self): query_set = super().get_queryset() return query_set.filter( post=self.kwargs['post_id']).select_related('author') # def profile(request, username):", "# return redirect('post', post_id=post.pk, username=post.author) # return render( # request, \"new_post.html\", {'form': form,", "super().get_queryset() return query_set.filter( author=profile).select_related( 'group', 'author') # @login_required # def post_edit(request, username, post_id):", "Group template_name = 'new_post.html' form_class = GroupForm extra_context = { 'title': 'Создать группу'", "return query_set.filter( author__following__user=self.request.user).select_related( 'group', 'author') @login_required def profile_follow(request, username): '''Функция для подписки на", "post = form.save(commit=False) # post.author = request.user # post.save() # messages.add_message( # request,", "comment.post = post # comment.author = request.user # comment.save() # return redirect('post', post_id=post_id,", "pk_url_kwarg = 'post_id' def form_valid(self, form): post = get_object_or_404(Post, pk=self.kwargs['post_id']) comment = form.save(commit=False)", "def get_queryset(self): query_set = super().get_queryset() return query_set.filter( post=self.kwargs['post_id']).select_related('author') # def profile(request, username): #", "# if Follow.objects.filter(author=user_profile, # user=request.user).exists(): # following = True # return render(request, \"profile.html\",", "'''Страница редактирования публикации''' # title = 'Редактировать запись' # post = get_object_or_404(Post.objects.select_related('author'), pk=post_id)", "page, # 'paginator': paginator, # 'follow_page': follow_page}) class FollowIndexView(LoginRequiredMixin, ListView): model = Post", "'POST': # form = CommentForm(request.POST, instance=comment) # if form.is_valid(): # comment = form.save(commit=False)", "reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk }) # @login_required # def add_group(request): # '''Страница", "from django.contrib import messages from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from", "render(request, 'new_post.html', {'form': form}) # @login_required class NewPostCreateView(LoginRequiredMixin, CreateView): model = Post form_class", "{'form': form, 'title': title, 'post': post}) class PostEditUpdateView(LoginRequiredMixin, UpdateView): model = Post slug_field", "'profile.html' paginate_by = 5 context_object_name = 'post_list' @property def extra_context(self): profile = get_object_or_404(", "form_valid(self, form): post = get_object_or_404(Post, pk=self.kwargs['post_id']) comment = form.save(commit=False) comment.post = post comment.author", "== comment.author: comment.delete() return redirect('post', username=username, post_id=post_id) # @login_required # def edit_comment(request, username,", "following } def get_queryset(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True)))", "= CommentForm pk_url_kwarg = 'comment_id' extra_context = { 'title': 'Редактировать комментарий' } def", "class PostView(ListView): model = Comment template_name = 'post_view.html' context_object_name = 'comments' @property def", "# if form.is_valid(): # comment = form.save(commit=False) # comment.post = post # comment.author", "get_object_or_404( Post.objects.annotate( comment_count=Count( 'commented_post')).select_related('author', 'group'), pk=self.kwargs['post_id']) following = False if self.request.user.is_authenticated: if Follow.objects.filter(author=user_profile,", "render(request, \"follow.html\", {'page': page, # 'paginator': paginator, # 'follow_page': follow_page}) class FollowIndexView(LoginRequiredMixin, ListView):", "user=request.user).exists(): # following = True # return render(request, \"profile.html\", {'profile': user_profile, # 'post_list':", "username, post_id, comment_id): # '''Функция для редактирования комментария к публикации''' # title =", "super().form_valid(form) def get_success_url(self): return reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk }) # @login_required #", "Follow.objects.filter(author=user_profile, # user=request.user).exists(): # following = True # return render(request, \"profile.html\", {'profile': user_profile,", "form): post = form.save(commit=False) post.author = self.request.user post.save() messages.add_message( self.request, messages.SUCCESS, f'Новая запись", "@property def extra_context(self): user_profile = get_object_or_404( User.objects.filter(username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True), post_count=Count('post_author', distinct=True)))", "# user=request.user).exists(): # following = True # return render(request, 'post_view.html', {'post': post, #", "user_profile, # 'post_list': page, # 'paginator': paginator, # 'following': following}) class ProfileView(ListView): model", "post=self.kwargs['post_id']).select_related('author') # def profile(request, username): # '''Страница с публикациями пользователя''' # user_profile =", "# post.pub_date = timezone.now() # post.save() # return redirect('post', post_id=post.pk, username=username) # else:", "= 'post_list' extra_context = { 'follow_page': True } def get_queryset(self): query_set = super().get_queryset()", "form, # 'following': following}) class PostView(ListView): model = Comment template_name = 'post_view.html' context_object_name", "get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) query_set = super().get_queryset() return query_set.filter( author=profile).select_related(", "{ 'title': 'Создать группу' } def get_success_url(self, **kwargs): return reverse('group', kwargs={'slug': self.object.slug}) def", "'title': title, 'post': post}) class PostEditUpdateView(LoginRequiredMixin, UpdateView): model = Post slug_field = 'username'", "post_comment, # 'form': form, # 'following': following}) class PostView(ListView): model = Comment template_name", "django.contrib.auth.mixins import LoginRequiredMixin from django.core.paginator import Paginator from django.db.models import Count from django.shortcuts", "import LoginRequiredMixin from django.core.paginator import Paginator from django.db.models import Count from django.shortcuts import", "= form.save(commit=False) post.pub_date = timezone.now() post.save() messages.add_message( self.request, messages.SUCCESS, f'Запись обновлена!', extra_tags='success' )", "CommentForm slug_field = 'username' pk_url_kwarg = 'post_id' def form_valid(self, form): post = get_object_or_404(Post,", "post = form.save(commit=False) post.author = self.request.user post.save() messages.add_message( self.request, messages.SUCCESS, f'Новая запись добавлена'", "# def profile(request, username): # '''Страница с публикациями пользователя''' # user_profile = get_object_or_404(", "query_set.filter( author__following__user=self.request.user).select_related( 'group', 'author') @login_required def profile_follow(request, username): '''Функция для подписки на пользователя'''", "= PostForm(request.POST or None, # files=request.FILES or None, # instance=post) # if form.is_valid():", "Post.objects.filter( # author=user_profile).select_related( # 'group', 'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator =", "Post template_name = 'group.html' paginate_by = 5 context_object_name = 'post_list' @property def extra_context(self):", "following = True # return render(request, 'post_view.html', {'post': post, # 'profile': user_profile, #", "request.user # post.save() # messages.add_message( # request, messages.SUCCESS, f'Новая запись добавлена', extra_tags='success' #", "PostView(ListView): model = Comment template_name = 'post_view.html' context_object_name = 'comments' @property def extra_context(self):", "post = get_object_or_404(Post.objects.select_related('author'), pk=post_id) # if request.user == post.author: # if request.method ==", "'post_id' form_class = PostForm template_name = 'new_post.html' extra_context = { 'title': 'Редактировать запись'", "request.method == 'POST': # form = GroupForm(request.POST) # if form.is_valid(): # slug =", "# if request.user == post.author: # post.delete() # return redirect('profile', username=username) # return", "from django.contrib.auth.mixins import LoginRequiredMixin from django.core.paginator import Paginator from django.db.models import Count from", "группой''' # group = get_object_or_404(Group, slug=slug) # post_list = Post.objects.filter( # group=group).select_related( #", "= Comment template_name = 'post_view.html' context_object_name = 'comments' @property def extra_context(self): user_profile =", "def profile_unfollow(request, username): '''Функция для отписки от пользователя''' follover = Follow.objects.filter(author__username=username, user=request.user) follover.delete()", "title = 'Создать группу' # if request.method == 'POST': # form = GroupForm(request.POST)", "'post_list': page, # 'paginator': paginator}) class GroupPostView(ListView): model = Post template_name = 'group.html'", "'index_page': True } # def group_posts(request, slug): # '''Страница с публикиями связанными с", "form = CommentForm() # following = False # if request.user.is_authenticated: # if Follow.objects.filter(author=user_profile,", "# '''Страница редактирования публикации''' # title = 'Редактировать запись' # post = get_object_or_404(Post.objects.select_related('author'),", "reverse('profile', kwargs={ 'username': self.object.author, }) # @login_required # def post_delete(request, username, post_id): #", "создания новой публикации''' # if request.method == 'POST': # form = PostForm(request.POST, files=request.FILES", "= timezone.now() comment.save() messages.add_message( self.request, messages.SUCCESS, f'Коментарий отредактирован', ) return super().form_valid(form) def get_success_url(self):", "context_object_name = 'post_list' extra_context = { 'index_page': True } # def group_posts(request, slug):", "к публикации''' # post = get_object_or_404(Post, pk=post_id) # if request.method == 'POST': #", "запись добавлена', extra_tags='success' # ) # return redirect('index') # else: # form =", "post_delete(request, username, post_id): # '''Функция для удаления публикации''' # post = get_object_or_404(Post, pk=post_id)", "return redirect(\"group\", slug=slug) # return render(request, \"new_post.html\", {'form': form, 'title': title}) # form", "= 'post_list' extra_context = { 'index_page': True } # def group_posts(request, slug): #", "query_set.filter( post=self.kwargs['post_id']).select_related('author') # def profile(request, username): # '''Страница с публикациями пользователя''' # user_profile", "kwargs={ 'username': self.object.author, }) # @login_required # def add_comment(request, username, post_id): # '''Функция", "= 5 context_object_name = 'post_list' @property def extra_context(self): return { 'group': get_object_or_404(Group, slug=self.kwargs['slug'])", "form): post = form.save(commit=False) post.pub_date = timezone.now() post.save() messages.add_message( self.request, messages.SUCCESS, f'Запись обновлена!',", "= get_object_or_404( # User.objects.filter( # username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True))) #", "@login_required def profile_unfollow(request, username): '''Функция для отписки от пользователя''' follover = Follow.objects.filter(author__username=username, user=request.user)", "# def edit_comment(request, username, post_id, comment_id): # '''Функция для редактирования комментария к публикации'''", "= CommentForm(instance=comment) # return render(request, \"new_post.html\", {'form': form, 'title': title}) class CommentEditView(LoginRequiredMixin, UpdateView):", "= 5 context_object_name = 'post_list' @property def extra_context(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate(", "username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) following = False if self.request.user.is_authenticated: if Follow.objects.filter(author=profile, user=self.request.user).exists():", "# if form.is_valid(): # slug = form.cleaned_data['slug'] # form.save() # return redirect(\"group\", slug=slug)", "slug = form.cleaned_data['slug'] # form.save() # return redirect(\"group\", slug=slug) # return render(request, \"new_post.html\",", "if Follow.objects.filter(author=user_profile, # user=request.user).exists(): # following = True # return render(request, \"profile.html\", {'profile':", "= PostForm template_name = 'new_post.html' extra_context = { 'title': 'Редактировать запись' } def", "if request.user == post.author: # if request.method == \"POST\": # form = PostForm(request.POST", "= paginator.get_page(page_number) # following = False # if request.user.is_authenticated: # if Follow.objects.filter(author=user_profile, #", "form}) # @login_required class NewPostCreateView(LoginRequiredMixin, CreateView): model = Post form_class = PostForm template_name", "import get_object_or_404, redirect, render from django.urls import reverse from django.utils import timezone from", "form): comment = form.save(commit=False) comment.created = timezone.now() comment.save() messages.add_message( self.request, messages.SUCCESS, f'Коментарий отредактирован',", "if form.is_valid(): # comment = form.save(commit=False) # comment.created = timezone.now() # comment.save() #", "post.pub_date = timezone.now() # post.save() # return redirect('post', post_id=post.pk, username=username) # else: #", "render( # request, \"new_post.html\", {'form': form, 'title': title, 'post': post}) class PostEditUpdateView(LoginRequiredMixin, UpdateView):", "= form.save(commit=False) # comment.created = timezone.now() # comment.save() # return redirect('post', username=username, post_id=post_id)", "post_id, username): # '''Страница отдельной публикации''' # user_profile = get_object_or_404( # User.objects.filter(username=username).annotate( #", "'''Функция для удаления публикации''' # post = get_object_or_404(Post, pk=post_id) # if request.user ==", "# post.author = request.user # post.save() # messages.add_message( # request, messages.SUCCESS, f'Новая запись", "redirect('profile', username=username) @login_required def delete_comment(request, username, post_id, comment_id): '''Функция для удаления комментария к", "if request.method == 'POST': # form = PostForm(request.POST, files=request.FILES or None) # if", "get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author, }) # @login_required # def add_comment(request, username,", "= self.request.user comment.save() return super().form_valid(form) def get_success_url(self): return reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk", "'title': title}) # form = GroupForm() # return render(request, \"new_post.html\", {'form': form, 'title':", "comment.save() # return redirect('post', username=username, post_id=post_id) # form = CommentForm(instance=comment) # return render(request,", "form, 'title': title}) class CommentEditView(LoginRequiredMixin, UpdateView): model = Comment template_name = 'new_post.html' form_class", "# form = CommentForm() # following = False # if request.user.is_authenticated: # if", "following = True # return render(request, \"profile.html\", {'profile': user_profile, # 'post_list': page, #", "'following': following}) class ProfileView(ListView): model = Post template_name = 'profile.html' paginate_by = 5", "'group'), # pk=post_id) # post_comment = Comment.objects.filter( # post=post_id).select_related('author').order_by(\"-created\").all() # form = CommentForm()", "username, post_id): # '''Страница редактирования публикации''' # title = 'Редактировать запись' # post", "# def follow_index(request): # '''Страница с публикациями избранных пользователей''' # follow_page = True", "для отписки от пользователя''' follover = Follow.objects.filter(author__username=username, user=request.user) follover.delete() return redirect('profile', username=username) @login_required", "group, # 'post_list': page, # 'paginator': paginator}) class GroupPostView(ListView): model = Post template_name", "username=username) if Follow.objects.filter(user=request.user, author=followed_author).exists(): return redirect('profile', username=username) Follow.objects.create(author=followed_author, user=request.user) return redirect('profile', username=username) @login_required", "'post_id' success_message = 'Запись удалена' def get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author, })", "пользователя''' follover = Follow.objects.filter(author__username=username, user=request.user) follover.delete() return redirect('profile', username=username) @login_required def delete_comment(request, username,", "CreateView): model = Group template_name = 'new_post.html' form_class = GroupForm extra_context = {", "model = Post template_name = 'index.html' paginate_by = 5 context_object_name = 'post_list' extra_context", "reverse from django.utils import timezone from requests import request from .forms import CommentForm,", "def post_edit(request, username, post_id): # '''Страница редактирования публикации''' # title = 'Редактировать запись'", "post_id=post.pk, username=post.author) class PostDeleteView(LoginRequiredMixin, DeleteView): model = Post template_name = 'post_delete.html' slug_field =", "'Создать группу' # if request.method == 'POST': # form = GroupForm(request.POST) # if", "@login_required # def edit_comment(request, username, post_id, comment_id): # '''Функция для редактирования комментария к", "= get_object_or_404(Group, slug=slug) # post_list = Post.objects.filter( # group=group).select_related( # 'author', 'group').annotate( #", "return redirect('profile', username=username) if Follow.objects.filter(user=request.user, author=followed_author).exists(): return redirect('profile', username=username) Follow.objects.create(author=followed_author, user=request.user) return redirect('profile',", "# if request.method == 'POST': # form = CommentForm(request.POST, instance=comment) # if form.is_valid():", ") return super().form_valid(form) def get_success_url(self): return reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk }) #", "404''' return render(request, \"misc/404.html\", {\"path\": request.path}, status=404) def server_error(request): '''Страница 500''' return render(request,", "return reverse('profile', kwargs={ 'username': self.object.author, }) # @login_required # def add_comment(request, username, post_id):", "= get_object_or_404( # Post.objects.annotate( # comment_count=Count( # 'commented_post')).select_related('author', 'group'), # pk=post_id) # post_comment", "= CommentForm() # following = False # if request.user.is_authenticated: # if Follow.objects.filter(author=user_profile, #", "== 'POST': # form = CommentForm(request.POST, instance=comment) # if form.is_valid(): # comment =", "'post': post}) class PostEditUpdateView(LoginRequiredMixin, UpdateView): model = Post slug_field = 'username' pk_url_kwarg =", "UpdateView): model = Comment template_name = 'new_post.html' form_class = CommentForm pk_url_kwarg = 'comment_id'", "model = Post slug_field = 'username' pk_url_kwarg = 'post_id' form_class = PostForm template_name", "user_profile, 'form': CommentForm(), 'following': following} def get_queryset(self): query_set = super().get_queryset() return query_set.filter( post=self.kwargs['post_id']).select_related('author')", "comment.save() messages.add_message( self.request, messages.SUCCESS, f'Коментарий отредактирован', ) return super().form_valid(form) def get_success_url(self): return reverse('post',", "= Post template_name = 'group.html' paginate_by = 5 context_object_name = 'post_list' @property def", "username=username, post_id=post_id) # @login_required # def edit_comment(request, username, post_id, comment_id): # '''Функция для", "post.author = request.user # post.save() # messages.add_message( # request, messages.SUCCESS, f'Новая запись добавлена',", "redirect('post', post_id=post.pk, username=post.author) class PostDeleteView(LoginRequiredMixin, DeleteView): model = Post template_name = 'post_delete.html' slug_field", "model = Comment template_name = 'comments.html' form_class = CommentForm slug_field = 'username' pk_url_kwarg", "True return {'post': post, 'profile': user_profile, 'form': CommentForm(), 'following': following} def get_queryset(self): query_set", "import request from .forms import CommentForm, GroupForm, PostForm from .models import Comment, Follow,", "'username': self.object.author, }) # @login_required # def add_comment(request, username, post_id): # '''Функция для", "# comment = form.save(commit=False) # comment.created = timezone.now() # comment.save() # return redirect('post',", "= timezone.now() # post.save() # return redirect('post', post_id=post.pk, username=username) # else: # form", "username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) query_set = super().get_queryset() return query_set.filter( author=profile).select_related( 'group', 'author')", "follover = Follow.objects.filter(author__username=username, user=request.user) follover.delete() return redirect('profile', username=username) @login_required def delete_comment(request, username, post_id,", "# title = 'Создать группу' # if request.method == 'POST': # form =", "UpdateView, DeleteView) class IndexListView(ListView): model = Post template_name = 'index.html' paginate_by = 5", "# return render(request, \"follow.html\", {'page': page, # 'paginator': paginator, # 'follow_page': follow_page}) class", "request.method == 'POST': # form = CommentForm(request.POST, instance=comment) # if form.is_valid(): # comment", "'''Страница с публикиями связанными с группой''' # group = get_object_or_404(Group, slug=slug) # post_list", "User from django.views.generic import (ListView, DetailView, CreateView, UpdateView, DeleteView) class IndexListView(ListView): model =", "Follow.objects.filter(author__username=username, user=request.user) follover.delete() return redirect('profile', username=username) @login_required def delete_comment(request, username, post_id, comment_id): '''Функция", "post = get_object_or_404(Post, pk=post_id) # if request.user == post.author: # post.delete() # return", "from .models import Comment, Follow, Group, Post, User from django.views.generic import (ListView, DetailView,", "# author=user_profile).select_related( # 'group', 'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list,", "CreateView, UpdateView, DeleteView) class IndexListView(ListView): model = Post template_name = 'index.html' paginate_by =", "# @login_required class NewPostCreateView(LoginRequiredMixin, CreateView): model = Post form_class = PostForm template_name =", "'''Функция для отписки от пользователя''' follover = Follow.objects.filter(author__username=username, user=request.user) follover.delete() return redirect('profile', username=username)", "kwargs={'slug': self.object.slug}) def page_not_found(request, exception): '''Страница 404''' return render(request, \"misc/404.html\", {\"path\": request.path}, status=404)", "CommentForm() # following = False # if request.user.is_authenticated: # if Follow.objects.filter(author=user_profile, # user=request.user).exists():", "comment_count=Count( 'commented_post')).select_related('author', 'group'), pk=self.kwargs['post_id']) following = False if self.request.user.is_authenticated: if Follow.objects.filter(author=user_profile, user=self.request.user).exists(): following", "# @login_required # def post_delete(request, username, post_id): # '''Функция для удаления публикации''' #", "'new_post.html', {'form': form}) # @login_required class NewPostCreateView(LoginRequiredMixin, CreateView): model = Post form_class =", "username=username, post_id=post_id) # form = CommentForm(instance=comment) # return render(request, \"new_post.html\", {'form': form, 'title':", "\"new_post.html\", {'form': form, 'title': title}) # form = GroupForm() # return render(request, \"new_post.html\",", "группу' } def get_success_url(self, **kwargs): return reverse('group', kwargs={'slug': self.object.slug}) def page_not_found(request, exception): '''Страница", "'profile': user_profile, # 'comments': post_comment, # 'form': form, # 'following': following}) class PostView(ListView):", "'paginator': paginator}) class GroupPostView(ListView): model = Post template_name = 'group.html' paginate_by = 5", "\"new_post.html\", {'form': form, 'title': title}) class CommentEditView(LoginRequiredMixin, UpdateView): model = Comment template_name =", "template_name = 'follow.html' paginate_by = 5 context_object_name = 'post_list' extra_context = { 'follow_page':", "return {'post': post, 'profile': user_profile, 'form': CommentForm(), 'following': following} def get_queryset(self): query_set =", "kwargs={'username': self.object.author, 'post_id': self.object.post.pk }) # @login_required # def follow_index(request): # '''Страница с", "= 5 context_object_name = 'post_list' extra_context = { 'index_page': True } # def", "get_success_url(self): return reverse('index') # def post_view(request, post_id, username): # '''Страница отдельной публикации''' #", "group=group).select_related( # 'author', 'group').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list, 10)", "return reverse('group', kwargs={'slug': self.object.slug}) def page_not_found(request, exception): '''Страница 404''' return render(request, \"misc/404.html\", {\"path\":", "# comment.save() # return redirect('post', post_id=post_id, username=username) # return redirect('post', post_id=post_id, username=username) class", "= CommentForm slug_field = 'username' pk_url_kwarg = 'post_id' def form_valid(self, form): post =", "following_count=Count('following', distinct=True), post_count=Count('post_author', distinct=True))) post = get_object_or_404( Post.objects.annotate( comment_count=Count( 'commented_post')).select_related('author', 'group'), pk=self.kwargs['post_id']) following", "if request.method == \"POST\": # form = PostForm(request.POST or None, # files=request.FILES or", "'POST': # form = GroupForm(request.POST) # if form.is_valid(): # slug = form.cleaned_data['slug'] #", "'group', 'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list, 10) # page_number", "User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) query_set = super().get_queryset() return query_set.filter( author=profile).select_related( 'group',", "# 'profile': user_profile, # 'comments': post_comment, # 'form': form, # 'following': following}) class", "def get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author, }) # @login_required # def add_comment(request,", "# comment_count=Count( # 'commented_post')).select_related('author', 'group'), # pk=post_id) # post_comment = Comment.objects.filter( # post=post_id).select_related('author').order_by(\"-created\").all()", "# page = paginator.get_page(page_number) # return render(request, \"follow.html\", {'page': page, # 'paginator': paginator,", "'index.html' paginate_by = 5 context_object_name = 'post_list' extra_context = { 'index_page': True }", "# return render(request, 'post_view.html', {'post': post, # 'profile': user_profile, # 'comments': post_comment, #", "с публикациями пользователя''' # user_profile = get_object_or_404( # User.objects.filter( # username=username).annotate( # follower_count=Count('follower',", "return redirect('post', username=username, post_id=post_id) # @login_required # def edit_comment(request, username, post_id, comment_id): #", "def profile_follow(request, username): '''Функция для подписки на пользователя''' followed_author = get_object_or_404(User, username=username) if", "# @login_required # def add_group(request): # '''Страница для добавления группы''' # title =", "# return render(request, \"new_post.html\", {'form': form, 'title': title}) class GroupAddView(LoginRequiredMixin, CreateView): model =", "def page_not_found(request, exception): '''Страница 404''' return render(request, \"misc/404.html\", {\"path\": request.path}, status=404) def server_error(request):", "def group_posts(request, slug): # '''Страница с публикиями связанными с группой''' # group =", "# def post_view(request, post_id, username): # '''Страница отдельной публикации''' # user_profile = get_object_or_404(", "# @login_required # def post_edit(request, username, post_id): # '''Страница редактирования публикации''' # title", "запись добавлена' ) return super().form_valid(form) def get_success_url(self): return reverse('index') # def post_view(request, post_id,", "class FollowIndexView(LoginRequiredMixin, ListView): model = Post template_name = 'follow.html' paginate_by = 5 context_object_name", "'profile': profile, 'following': following } def get_queryset(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower',", "username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True))) # post_list = Post.objects.filter( # author=user_profile).select_related(", "редактирования публикации''' # title = 'Редактировать запись' # post = get_object_or_404(Post.objects.select_related('author'), pk=post_id) #", "# @login_required # def new_post(request): # '''Страница создания новой публикации''' # if request.method", "model = Post template_name = 'group.html' paginate_by = 5 context_object_name = 'post_list' @property", "context_object_name = 'post_list' extra_context = { 'follow_page': True } def get_queryset(self): query_set =", "'post_view.html' context_object_name = 'comments' @property def extra_context(self): user_profile = get_object_or_404( User.objects.filter(username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True),", "= form.save(commit=False) comment.post = post comment.author = self.request.user comment.save() return super().form_valid(form) def get_success_url(self):", "# user=request.user).exists(): # following = True # return render(request, \"profile.html\", {'profile': user_profile, #", "def get_queryset(self, *args, **kwargs): query_set = super().get_queryset() return query_set.filter( group__slug=self.kwargs['slug']).select_related( 'author', 'group') #", "= get_object_or_404( # User.objects.filter(username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True), # post_count=Count('post_author', distinct=True)))", "= super().get_queryset() return query_set.filter( group__slug=self.kwargs['slug']).select_related( 'author', 'group') # @login_required # def new_post(request): #", "для добавления группы''' # title = 'Создать группу' # if request.method == 'POST':", "\"new_post.html\", {'form': form, 'title': title, 'post': post}) class PostEditUpdateView(LoginRequiredMixin, UpdateView): model = Post", "редактирования комментария к публикации''' # title = 'Редактировать комментарий' # comment = get_object_or_404(Comment,", "# following_count=Count('following', distinct=True), # post_count=Count('post_author', distinct=True))) # post = get_object_or_404( # Post.objects.annotate( #", "новою запись' } def form_valid(self, form): post = form.save(commit=False) post.author = self.request.user post.save()", "# User.objects.filter( # username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True))) # post_list =", "= { 'title': 'Создать группу' } def get_success_url(self, **kwargs): return reverse('group', kwargs={'slug': self.object.slug})", "= request.GET.get('page') # page = paginator.get_page(page_number) # following = False # if request.user.is_authenticated:", "def form_valid(self, form): post = get_object_or_404(Post, pk=self.kwargs['post_id']) comment = form.save(commit=False) comment.post = post", "= 'profile.html' paginate_by = 5 context_object_name = 'post_list' @property def extra_context(self): profile =", "reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk }) # @login_required # def follow_index(request): # '''Страница", "= get_object_or_404(Post.objects.select_related('author'), pk=post_id) # if request.user == post.author: # if request.method == \"POST\":", "profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) query_set = super().get_queryset() return", "comment.save() # return redirect('post', post_id=post_id, username=username) # return redirect('post', post_id=post_id, username=username) class AddCommentView(LoginRequiredMixin,", "группы''' # title = 'Создать группу' # if request.method == 'POST': # form", "get_queryset(self): query_set = super().get_queryset() return query_set.filter( author__following__user=self.request.user).select_related( 'group', 'author') @login_required def profile_follow(request, username):", "username): # '''Страница отдельной публикации''' # user_profile = get_object_or_404( # User.objects.filter(username=username).annotate( # follower_count=Count('follower',", "Comment.objects.filter( # post=post_id).select_related('author').order_by(\"-created\").all() # form = CommentForm() # following = False # if", "comment.author: # if request.method == 'POST': # form = CommentForm(request.POST, instance=comment) # if", "= Follow.objects.filter(author__username=username, user=request.user) follover.delete() return redirect('profile', username=username) @login_required def delete_comment(request, username, post_id, comment_id):", "self.request.user post.save() messages.add_message( self.request, messages.SUCCESS, f'Новая запись добавлена' ) return super().form_valid(form) def get_success_url(self):", "post, 'profile': user_profile, 'form': CommentForm(), 'following': following} def get_queryset(self): query_set = super().get_queryset() return", "'username' pk_url_kwarg = 'post_id' success_message = 'Запись удалена' def get_success_url(self): return reverse('profile', kwargs={", "title}) class GroupAddView(LoginRequiredMixin, CreateView): model = Group template_name = 'new_post.html' form_class = GroupForm", "following = False if self.request.user.is_authenticated: if Follow.objects.filter(author=profile, user=self.request.user).exists(): following = True return {", "django.core.paginator import Paginator from django.db.models import Count from django.shortcuts import get_object_or_404, redirect, render", "from requests import request from .forms import CommentForm, GroupForm, PostForm from .models import", "# group = get_object_or_404(Group, slug=slug) # post_list = Post.objects.filter( # group=group).select_related( # 'author',", "# def add_comment(request, username, post_id): # '''Функция для добавления комментария к публикации''' #", "от пользователя''' follover = Follow.objects.filter(author__username=username, user=request.user) follover.delete() return redirect('profile', username=username) @login_required def delete_comment(request,", "{'form': form, 'title': title}) class CommentEditView(LoginRequiredMixin, UpdateView): model = Comment template_name = 'new_post.html'", "template_name = 'comments.html' form_class = CommentForm slug_field = 'username' pk_url_kwarg = 'post_id' def", "user=self.request.user).exists(): following = True return { 'profile': profile, 'following': following } def get_queryset(self):", "import timezone from requests import request from .forms import CommentForm, GroupForm, PostForm from", "= Post template_name = 'index.html' paginate_by = 5 context_object_name = 'post_list' extra_context =", "Follow.objects.filter(author=user_profile, # user=request.user).exists(): # following = True # return render(request, 'post_view.html', {'post': post,", "# post=post_id).select_related('author').order_by(\"-created\").all() # form = CommentForm() # following = False # if request.user.is_authenticated:", "# 'post_list': page, # 'paginator': paginator, # 'following': following}) class ProfileView(ListView): model =", "PostForm template_name = 'new_post.html' extra_context = { 'title': 'Создать новою запись' } def", "or None, # instance=post) # if form.is_valid(): # post = form.save(commit=False) # post.pub_date", "self.object.post.pk }) # @login_required # def follow_index(request): # '''Страница с публикациями избранных пользователей'''", "'post_list': page, # 'paginator': paginator, # 'following': following}) class ProfileView(ListView): model = Post", "= 'post_id' form_class = PostForm template_name = 'new_post.html' extra_context = { 'title': 'Редактировать", "redirect('post', username=username, post_id=post_id) # form = CommentForm(instance=comment) # return render(request, \"new_post.html\", {'form': form,", "отдельной публикации''' # user_profile = get_object_or_404( # User.objects.filter(username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following',", "redirect(\"group\", slug=slug) # return render(request, \"new_post.html\", {'form': form, 'title': title}) # form =", "= paginator.get_page(page_number) # return render(request, \"follow.html\", {'page': page, # 'paginator': paginator, # 'follow_page':", "request.user == post.author: # post.delete() # return redirect('profile', username=username) # return redirect('post', post_id=post.pk,", "# group=group).select_related( # 'author', 'group').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list,", "request.GET.get('page') # page = paginator.get_page(page_number) # return render(request, \"follow.html\", {'page': page, # 'paginator':", "'paginator': paginator, # 'following': following}) class ProfileView(ListView): model = Post template_name = 'profile.html'", "'following': following } def get_queryset(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following',", "self.object.post.pk }) # @login_required # def add_group(request): # '''Страница для добавления группы''' #", "post.author: # post.delete() # return redirect('profile', username=username) # return redirect('post', post_id=post.pk, username=post.author) class", "# post = get_object_or_404( # Post.objects.annotate( # comment_count=Count( # 'commented_post')).select_related('author', 'group'), # pk=post_id)", "= 'post_list' @property def extra_context(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following',", "model = Comment template_name = 'post_view.html' context_object_name = 'comments' @property def extra_context(self): user_profile", "# title = 'Редактировать комментарий' # comment = get_object_or_404(Comment, post=post_id, pk=comment_id) # if", "{'post': post, 'profile': user_profile, 'form': CommentForm(), 'following': following} def get_queryset(self): query_set = super().get_queryset()", "distinct=True))) following = False if self.request.user.is_authenticated: if Follow.objects.filter(author=profile, user=self.request.user).exists(): following = True return", "template_name = 'new_post.html' extra_context = { 'title': 'Редактировать запись' } def form_valid(self, form):", "к публикации''' comment = get_object_or_404(Comment, post=post_id, pk=comment_id) if request.user == comment.author: comment.delete() return", "self.request.user.is_authenticated: if Follow.objects.filter(author=user_profile, user=self.request.user).exists(): following = True return {'post': post, 'profile': user_profile, 'form':", "username): '''Функция для подписки на пользователя''' followed_author = get_object_or_404(User, username=username) if followed_author ==", "if form.is_valid(): # slug = form.cleaned_data['slug'] # form.save() # return redirect(\"group\", slug=slug) #", "def extra_context(self): user_profile = get_object_or_404( User.objects.filter(username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True), post_count=Count('post_author', distinct=True))) post", "extra_tags='success' # ) # return redirect('index') # else: # form = PostForm() #", "profile_unfollow(request, username): '''Функция для отписки от пользователя''' follover = Follow.objects.filter(author__username=username, user=request.user) follover.delete() return", "= Post.objects.filter( # author=user_profile).select_related( # 'group', 'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator", "if Follow.objects.filter(author=user_profile, # user=request.user).exists(): # following = True # return render(request, 'post_view.html', {'post':", "= timezone.now() post.save() messages.add_message( self.request, messages.SUCCESS, f'Запись обновлена!', extra_tags='success' ) return super().form_valid(form) def", "return render(request, \"new_post.html\", {'form': form, 'title': title}) # form = GroupForm() # return", "distinct=True), # following_count=Count('following', distinct=True))) # post_list = Post.objects.filter( # author=user_profile).select_related( # 'group', 'author').annotate(", "# page_number = request.GET.get('page') # page = paginator.get_page(page_number) # return render(request, \"group.html\", {'group':", "= GroupForm extra_context = { 'title': 'Создать группу' } def get_success_url(self, **kwargs): return", "def get_success_url(self, **kwargs): return reverse('group', kwargs={'slug': self.object.slug}) def page_not_found(request, exception): '''Страница 404''' return", "pk=post_id) # post_comment = Comment.objects.filter( # post=post_id).select_related('author').order_by(\"-created\").all() # form = CommentForm() # following", "username=post.author) # return render( # request, \"new_post.html\", {'form': form, 'title': title, 'post': post})", "} def form_valid(self, form): comment = form.save(commit=False) comment.created = timezone.now() comment.save() messages.add_message( self.request,", "title = 'Редактировать запись' # post = get_object_or_404(Post.objects.select_related('author'), pk=post_id) # if request.user ==", "group_posts(request, slug): # '''Страница с публикиями связанными с группой''' # group = get_object_or_404(Group,", "request.method == 'POST': # form = PostForm(request.POST, files=request.FILES or None) # if form.is_valid():", "slug): # '''Страница с публикиями связанными с группой''' # group = get_object_or_404(Group, slug=slug)", "pk=self.kwargs['post_id']) following = False if self.request.user.is_authenticated: if Follow.objects.filter(author=user_profile, user=self.request.user).exists(): following = True return", "# '''Страница с публикациями пользователя''' # user_profile = get_object_or_404( # User.objects.filter( # username=username).annotate(", "if request.user == post.author: # post.delete() # return redirect('profile', username=username) # return redirect('post',", "comment.delete() return redirect('post', username=username, post_id=post_id) # @login_required # def edit_comment(request, username, post_id, comment_id):", "= 'Создать группу' # if request.method == 'POST': # form = GroupForm(request.POST) #", "'POST': # form = CommentForm(request.POST) # if form.is_valid(): # comment = form.save(commit=False) #", "# follow_page = True # post_list = Post.objects.filter( # author__following__user=request.user).select_related( # 'group', 'author').annotate(", "**kwargs): return reverse('group', kwargs={'slug': self.object.slug}) def page_not_found(request, exception): '''Страница 404''' return render(request, \"misc/404.html\",", "pk_url_kwarg = 'comment_id' extra_context = { 'title': 'Редактировать комментарий' } def form_valid(self, form):", "new_post(request): # '''Страница создания новой публикации''' # if request.method == 'POST': # form", "# Post.objects.annotate( # comment_count=Count( # 'commented_post')).select_related('author', 'group'), # pk=post_id) # post_comment = Comment.objects.filter(", "group__slug=self.kwargs['slug']).select_related( 'author', 'group') # @login_required # def new_post(request): # '''Страница создания новой публикации'''", "@login_required def profile_follow(request, username): '''Функция для подписки на пользователя''' followed_author = get_object_or_404(User, username=username)", "from django.db.models import Count from django.shortcuts import get_object_or_404, redirect, render from django.urls import", "= PostForm() # return render(request, 'new_post.html', {'form': form}) # @login_required class NewPostCreateView(LoginRequiredMixin, CreateView):", "= form.save(commit=False) comment.created = timezone.now() comment.save() messages.add_message( self.request, messages.SUCCESS, f'Коментарий отредактирован', ) return", "slug_field = 'username' pk_url_kwarg = 'post_id' form_class = PostForm template_name = 'new_post.html' extra_context", "f'Новая запись добавлена', extra_tags='success' # ) # return redirect('index') # else: # form", "post_count=Count('post_author', distinct=True))) # post = get_object_or_404( # Post.objects.annotate( # comment_count=Count( # 'commented_post')).select_related('author', 'group'),", "# messages.add_message( # request, messages.SUCCESS, f'Новая запись добавлена', extra_tags='success' # ) # return", "if form.is_valid(): # post = form.save(commit=False) # post.pub_date = timezone.now() # post.save() #", "= 'username' pk_url_kwarg = 'post_id' def form_valid(self, form): post = get_object_or_404(Post, pk=self.kwargs['post_id']) comment", "'author') # @login_required # def post_edit(request, username, post_id): # '''Страница редактирования публикации''' #", "slug_field = 'username' pk_url_kwarg = 'post_id' def form_valid(self, form): post = get_object_or_404(Post, pk=self.kwargs['post_id'])", "following_count=Count('following', distinct=True), # post_count=Count('post_author', distinct=True))) # post = get_object_or_404( # Post.objects.annotate( # comment_count=Count(", "def get_success_url(self): return reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk }) # @login_required # def", "False if self.request.user.is_authenticated: if Follow.objects.filter(author=profile, user=self.request.user).exists(): following = True return { 'profile': profile,", "'''Страница с публикациями пользователя''' # user_profile = get_object_or_404( # User.objects.filter( # username=username).annotate( #", "comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list, 10) # page_number = request.GET.get('page') #", "query_set.filter( author=profile).select_related( 'group', 'author') # @login_required # def post_edit(request, username, post_id): # '''Страница", "return render(request, \"profile.html\", {'profile': user_profile, # 'post_list': page, # 'paginator': paginator, # 'following':", "get_queryset(self, *args, **kwargs): query_set = super().get_queryset() return query_set.filter( group__slug=self.kwargs['slug']).select_related( 'author', 'group') # @login_required", "'form': CommentForm(), 'following': following} def get_queryset(self): query_set = super().get_queryset() return query_set.filter( post=self.kwargs['post_id']).select_related('author') #", "{'profile': user_profile, # 'post_list': page, # 'paginator': paginator, # 'following': following}) class ProfileView(ListView):", "form, 'title': title, 'post': post}) class PostEditUpdateView(LoginRequiredMixin, UpdateView): model = Post slug_field =", "post_id): # '''Функция для добавления комментария к публикации''' # post = get_object_or_404(Post, pk=post_id)", "= 'post_id' def form_valid(self, form): post = get_object_or_404(Post, pk=self.kwargs['post_id']) comment = form.save(commit=False) comment.post", "return render(request, \"group.html\", {'group': group, # 'post_list': page, # 'paginator': paginator}) class GroupPostView(ListView):", "= True # post_list = Post.objects.filter( # author__following__user=request.user).select_related( # 'group', 'author').annotate( # comment_count=Count(", "'post_list' extra_context = { 'index_page': True } # def group_posts(request, slug): # '''Страница", "# author__following__user=request.user).select_related( # 'group', 'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list,", "form = CommentForm(request.POST, instance=comment) # if form.is_valid(): # comment = form.save(commit=False) # comment.created", "# if form.is_valid(): # post = form.save(commit=False) # post.pub_date = timezone.now() # post.save()", "# form = PostForm() # return render(request, 'new_post.html', {'form': form}) # @login_required class", "post_list = Post.objects.filter( # group=group).select_related( # 'author', 'group').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() #", "comment = form.save(commit=False) # comment.post = post # comment.author = request.user # comment.save()", "return render(request, \"new_post.html\", {'form': form, 'title': title}) class GroupAddView(LoginRequiredMixin, CreateView): model = Group", ".models import Comment, Follow, Group, Post, User from django.views.generic import (ListView, DetailView, CreateView,", "== 'POST': # form = PostForm(request.POST, files=request.FILES or None) # if form.is_valid(): #", "form = PostForm(request.POST, files=request.FILES or None) # if form.is_valid(): # post = form.save(commit=False)", "'''Функция для добавления комментария к публикации''' # post = get_object_or_404(Post, pk=post_id) # if", "pk=comment_id) # if request.user == comment.author: # if request.method == 'POST': # form", "= 'new_post.html' extra_context = { 'title': 'Создать новою запись' } def form_valid(self, form):", "\"profile.html\", {'profile': user_profile, # 'post_list': page, # 'paginator': paginator, # 'following': following}) class", "= PostForm(request.POST, files=request.FILES or None) # if form.is_valid(): # post = form.save(commit=False) #", "PostEditUpdateView(LoginRequiredMixin, UpdateView): model = Post slug_field = 'username' pk_url_kwarg = 'post_id' form_class =", "def follow_index(request): # '''Страница с публикациями избранных пользователей''' # follow_page = True #", "Post template_name = 'follow.html' paginate_by = 5 context_object_name = 'post_list' extra_context = {", "5 context_object_name = 'post_list' extra_context = { 'index_page': True } # def group_posts(request,", "f'Новая запись добавлена' ) return super().form_valid(form) def get_success_url(self): return reverse('index') # def post_view(request,", "post_view(request, post_id, username): # '''Страница отдельной публикации''' # user_profile = get_object_or_404( # User.objects.filter(username=username).annotate(", "return super().form_valid(form) def get_success_url(self): return reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk }) # @login_required", "follow_index(request): # '''Страница с публикациями избранных пользователей''' # follow_page = True # post_list", "# 'follow_page': follow_page}) class FollowIndexView(LoginRequiredMixin, ListView): model = Post template_name = 'follow.html' paginate_by", "user=self.request.user).exists(): following = True return {'post': post, 'profile': user_profile, 'form': CommentForm(), 'following': following}", "= { 'title': 'Редактировать комментарий' } def form_valid(self, form): comment = form.save(commit=False) comment.created", "# form = GroupForm() # return render(request, \"new_post.html\", {'form': form, 'title': title}) class", "post_count=Count('post_author', distinct=True))) post = get_object_or_404( Post.objects.annotate( comment_count=Count( 'commented_post')).select_related('author', 'group'), pk=self.kwargs['post_id']) following = False", "# if form.is_valid(): # comment = form.save(commit=False) # comment.created = timezone.now() # comment.save()", "render(request, \"new_post.html\", {'form': form, 'title': title}) class GroupAddView(LoginRequiredMixin, CreateView): model = Group template_name", "comment_id): # '''Функция для редактирования комментария к публикации''' # title = 'Редактировать комментарий'", "profile, 'following': following } def get_queryset(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True),", "с публикациями избранных пользователей''' # follow_page = True # post_list = Post.objects.filter( #", "following = True return {'post': post, 'profile': user_profile, 'form': CommentForm(), 'following': following} def", "return reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk }) # @login_required # def follow_index(request): #", "Follow, Group, Post, User from django.views.generic import (ListView, DetailView, CreateView, UpdateView, DeleteView) class", "Post form_class = PostForm template_name = 'new_post.html' extra_context = { 'title': 'Создать новою", "from django.urls import reverse from django.utils import timezone from requests import request from", "comment.author = self.request.user comment.save() return super().form_valid(form) def get_success_url(self): return reverse('post', kwargs={'username': self.object.author, 'post_id':", "= get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) query_set = super().get_queryset() return query_set.filter(", "# else: # form = PostForm(instance=post) # else: # return redirect('post', post_id=post.pk, username=post.author)", "User.objects.filter(username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True), # post_count=Count('post_author', distinct=True))) # post =", "redirect('post', post_id=post_id, username=username) class AddCommentView(LoginRequiredMixin, CreateView): model = Comment template_name = 'comments.html' form_class", "extra_context(self): return { 'group': get_object_or_404(Group, slug=self.kwargs['slug']) } def get_queryset(self, *args, **kwargs): query_set =", "follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True), post_count=Count('post_author', distinct=True))) post = get_object_or_404( Post.objects.annotate( comment_count=Count( 'commented_post')).select_related('author', 'group'),", "избранных пользователей''' # follow_page = True # post_list = Post.objects.filter( # author__following__user=request.user).select_related( #", "import Comment, Follow, Group, Post, User from django.views.generic import (ListView, DetailView, CreateView, UpdateView,", "для подписки на пользователя''' followed_author = get_object_or_404(User, username=username) if followed_author == request.user: return", "form.save(commit=False) post.author = self.request.user post.save() messages.add_message( self.request, messages.SUCCESS, f'Новая запись добавлена' ) return", "'''Страница для добавления группы''' # title = 'Создать группу' # if request.method ==", "# form = CommentForm(request.POST) # if form.is_valid(): # comment = form.save(commit=False) # comment.post", "following = True return { 'profile': profile, 'following': following } def get_queryset(self): profile", "# return redirect(\"group\", slug=slug) # return render(request, \"new_post.html\", {'form': form, 'title': title}) #", "PostDeleteView(LoginRequiredMixin, DeleteView): model = Post template_name = 'post_delete.html' slug_field = 'username' pk_url_kwarg =", "'group', 'author') @login_required def profile_follow(request, username): '''Функция для подписки на пользователя''' followed_author =", "{'post': post, # 'profile': user_profile, # 'comments': post_comment, # 'form': form, # 'following':", "post_id=post_id, username=username) # return redirect('post', post_id=post_id, username=username) class AddCommentView(LoginRequiredMixin, CreateView): model = Comment", "NewPostCreateView(LoginRequiredMixin, CreateView): model = Post form_class = PostForm template_name = 'new_post.html' extra_context =", "Post template_name = 'post_delete.html' slug_field = 'username' pk_url_kwarg = 'post_id' success_message = 'Запись", "Paginator(post_list, 10) # page_number = request.GET.get('page') # page = paginator.get_page(page_number) # return render(request,", "{'form': form}) # @login_required class NewPostCreateView(LoginRequiredMixin, CreateView): model = Post form_class = PostForm", "page_number = request.GET.get('page') # page = paginator.get_page(page_number) # return render(request, \"follow.html\", {'page': page,", "extra_context = { 'title': 'Создать группу' } def get_success_url(self, **kwargs): return reverse('group', kwargs={'slug':", "title}) # form = GroupForm() # return render(request, \"new_post.html\", {'form': form, 'title': title})", "# form = PostForm(request.POST, files=request.FILES or None) # if form.is_valid(): # post =", "# User.objects.filter(username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True), # post_count=Count('post_author', distinct=True))) # post", "distinct=True), following_count=Count('following', distinct=True))) following = False if self.request.user.is_authenticated: if Follow.objects.filter(author=profile, user=self.request.user).exists(): following =", "# post_list = Post.objects.filter( # group=group).select_related( # 'author', 'group').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all()", "@login_required # def post_edit(request, username, post_id): # '''Страница редактирования публикации''' # title =", "request.user: return redirect('profile', username=username) if Follow.objects.filter(user=request.user, author=followed_author).exists(): return redirect('profile', username=username) Follow.objects.create(author=followed_author, user=request.user) return", "slug=self.kwargs['slug']) } def get_queryset(self, *args, **kwargs): query_set = super().get_queryset() return query_set.filter( group__slug=self.kwargs['slug']).select_related( 'author',", "\"new_post.html\", {'form': form, 'title': title}) class GroupAddView(LoginRequiredMixin, CreateView): model = Group template_name =", "messages.SUCCESS, f'Коментарий отредактирован', ) return super().form_valid(form) def get_success_url(self): return reverse('post', kwargs={'username': self.object.author, 'post_id':", "post_list = Post.objects.filter( # author=user_profile).select_related( # 'group', 'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() #", "== post.author: # if request.method == \"POST\": # form = PostForm(request.POST or None,", "post.pub_date = timezone.now() post.save() messages.add_message( self.request, messages.SUCCESS, f'Запись обновлена!', extra_tags='success' ) return super().form_valid(form)", "pk_url_kwarg = 'post_id' success_message = 'Запись удалена' def get_success_url(self): return reverse('profile', kwargs={ 'username':", "paginator.get_page(page_number) # following = False # if request.user.is_authenticated: # if Follow.objects.filter(author=user_profile, # user=request.user).exists():", "query_set = super().get_queryset() return query_set.filter( post=self.kwargs['post_id']).select_related('author') # def profile(request, username): # '''Страница с", "'group').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list, 10) # page_number =", "post.save() # messages.add_message( # request, messages.SUCCESS, f'Новая запись добавлена', extra_tags='success' # ) #", "request.GET.get('page') # page = paginator.get_page(page_number) # following = False # if request.user.is_authenticated: #", "post}) class PostEditUpdateView(LoginRequiredMixin, UpdateView): model = Post slug_field = 'username' pk_url_kwarg = 'post_id'", "= 5 context_object_name = 'post_list' extra_context = { 'follow_page': True } def get_queryset(self):", "комментария к публикации''' comment = get_object_or_404(Comment, post=post_id, pk=comment_id) if request.user == comment.author: comment.delete()", "# form.save() # return redirect(\"group\", slug=slug) # return render(request, \"new_post.html\", {'form': form, 'title':", "if request.method == 'POST': # form = GroupForm(request.POST) # if form.is_valid(): # slug", "super().form_valid(form) def get_success_url(self): return reverse('index') # def post_view(request, post_id, username): # '''Страница отдельной", "from django.shortcuts import get_object_or_404, redirect, render from django.urls import reverse from django.utils import", "redirect('post', post_id=post.pk, username=username) # else: # form = PostForm(instance=post) # else: # return", "get_queryset(self): query_set = super().get_queryset() return query_set.filter( post=self.kwargs['post_id']).select_related('author') # def profile(request, username): # '''Страница", "request.user == comment.author: # if request.method == 'POST': # form = CommentForm(request.POST, instance=comment)", "{'group': group, # 'post_list': page, # 'paginator': paginator}) class GroupPostView(ListView): model = Post", "добавления группы''' # title = 'Создать группу' # if request.method == 'POST': #", "PostForm() # return render(request, 'new_post.html', {'form': form}) # @login_required class NewPostCreateView(LoginRequiredMixin, CreateView): model", "if request.method == 'POST': # form = CommentForm(request.POST) # if form.is_valid(): # comment", "get_object_or_404(Comment, post=post_id, pk=comment_id) # if request.user == comment.author: # if request.method == 'POST':", "query_set = super().get_queryset() return query_set.filter( author__following__user=self.request.user).select_related( 'group', 'author') @login_required def profile_follow(request, username): '''Функция", "model = Post form_class = PostForm template_name = 'new_post.html' extra_context = { 'title':", "redirect('post', username=username, post_id=post_id) # @login_required # def edit_comment(request, username, post_id, comment_id): # '''Функция", "пользователя''' followed_author = get_object_or_404(User, username=username) if followed_author == request.user: return redirect('profile', username=username) if", "comment.save() return super().form_valid(form) def get_success_url(self): return reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk }) #", "== post.author: # post.delete() # return redirect('profile', username=username) # return redirect('post', post_id=post.pk, username=post.author)", "= post # comment.author = request.user # comment.save() # return redirect('post', post_id=post_id, username=username)", "author=user_profile).select_related( # 'group', 'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list, 10)", "comment.author: comment.delete() return redirect('post', username=username, post_id=post_id) # @login_required # def edit_comment(request, username, post_id,", "timezone from requests import request from .forms import CommentForm, GroupForm, PostForm from .models", "def edit_comment(request, username, post_id, comment_id): # '''Функция для редактирования комментария к публикации''' #", "10) # page_number = request.GET.get('page') # page = paginator.get_page(page_number) # return render(request, \"group.html\",", "def extra_context(self): return { 'group': get_object_or_404(Group, slug=self.kwargs['slug']) } def get_queryset(self, *args, **kwargs): query_set", "= request.GET.get('page') # page = paginator.get_page(page_number) # return render(request, \"group.html\", {'group': group, #", "# post_list = Post.objects.filter( # author=user_profile).select_related( # 'group', 'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all()", "request.user == post.author: # if request.method == \"POST\": # form = PostForm(request.POST or", "подписки на пользователя''' followed_author = get_object_or_404(User, username=username) if followed_author == request.user: return redirect('profile',", "'comments': post_comment, # 'form': form, # 'following': following}) class PostView(ListView): model = Comment", "query_set = super().get_queryset() return query_set.filter( group__slug=self.kwargs['slug']).select_related( 'author', 'group') # @login_required # def new_post(request):", "pk_url_kwarg = 'post_id' form_class = PostForm template_name = 'new_post.html' extra_context = { 'title':", "model = Group template_name = 'new_post.html' form_class = GroupForm extra_context = { 'title':", "'follow_page': follow_page}) class FollowIndexView(LoginRequiredMixin, ListView): model = Post template_name = 'follow.html' paginate_by =", "super().get_queryset() return query_set.filter( author__following__user=self.request.user).select_related( 'group', 'author') @login_required def profile_follow(request, username): '''Функция для подписки", "extra_context = { 'index_page': True } # def group_posts(request, slug): # '''Страница с", "= 'username' pk_url_kwarg = 'post_id' success_message = 'Запись удалена' def get_success_url(self): return reverse('profile',", "if self.request.user.is_authenticated: if Follow.objects.filter(author=user_profile, user=self.request.user).exists(): following = True return {'post': post, 'profile': user_profile,", "# files=request.FILES or None, # instance=post) # if form.is_valid(): # post = form.save(commit=False)", "None) # if form.is_valid(): # post = form.save(commit=False) # post.author = request.user #", "messages.add_message( self.request, messages.SUCCESS, f'Запись обновлена!', extra_tags='success' ) return super().form_valid(form) def get_success_url(self): return reverse('profile',", "comment_id): '''Функция для удаления комментария к публикации''' comment = get_object_or_404(Comment, post=post_id, pk=comment_id) if", "distinct=True))) query_set = super().get_queryset() return query_set.filter( author=profile).select_related( 'group', 'author') # @login_required # def", "messages.SUCCESS, f'Новая запись добавлена' ) return super().form_valid(form) def get_success_url(self): return reverse('index') # def", "distinct=True), following_count=Count('following', distinct=True))) query_set = super().get_queryset() return query_set.filter( author=profile).select_related( 'group', 'author') # @login_required", "comment.post = post comment.author = self.request.user comment.save() return super().form_valid(form) def get_success_url(self): return reverse('post',", "= CommentForm(request.POST, instance=comment) # if form.is_valid(): # comment = form.save(commit=False) # comment.created =", "comment = form.save(commit=False) # comment.created = timezone.now() # comment.save() # return redirect('post', username=username,", "# request, messages.SUCCESS, f'Новая запись добавлена', extra_tags='success' # ) # return redirect('index') #", "{ 'title': 'Редактировать комментарий' } def form_valid(self, form): comment = form.save(commit=False) comment.created =", "{ 'index_page': True } # def group_posts(request, slug): # '''Страница с публикиями связанными", "= PostForm(instance=post) # else: # return redirect('post', post_id=post.pk, username=post.author) # return render( #", "super().form_valid(form) def get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author, }) # @login_required # def", "# title = 'Редактировать запись' # post = get_object_or_404(Post.objects.select_related('author'), pk=post_id) # if request.user", "# post.delete() # return redirect('profile', username=username) # return redirect('post', post_id=post.pk, username=post.author) class PostDeleteView(LoginRequiredMixin,", "{ 'follow_page': True } def get_queryset(self): query_set = super().get_queryset() return query_set.filter( author__following__user=self.request.user).select_related( 'group',", "return super().form_valid(form) def get_success_url(self): return reverse('index') # def post_view(request, post_id, username): # '''Страница", "return reverse('profile', kwargs={ 'username': self.object.author, }) # @login_required # def post_delete(request, username, post_id):", "'form': form, # 'following': following}) class PostView(ListView): model = Comment template_name = 'post_view.html'", "post = get_object_or_404(Post, pk=post_id) # if request.method == 'POST': # form = CommentForm(request.POST)", "'author') @login_required def profile_follow(request, username): '''Функция для подписки на пользователя''' followed_author = get_object_or_404(User,", "комментарий' } def form_valid(self, form): comment = form.save(commit=False) comment.created = timezone.now() comment.save() messages.add_message(", "following = False # if request.user.is_authenticated: # if Follow.objects.filter(author=user_profile, # user=request.user).exists(): # following", "add_comment(request, username, post_id): # '''Функция для добавления комментария к публикации''' # post =", "Post.objects.annotate( comment_count=Count( 'commented_post')).select_related('author', 'group'), pk=self.kwargs['post_id']) following = False if self.request.user.is_authenticated: if Follow.objects.filter(author=user_profile, user=self.request.user).exists():", "return render(request, \"follow.html\", {'page': page, # 'paginator': paginator, # 'follow_page': follow_page}) class FollowIndexView(LoginRequiredMixin,", "def add_comment(request, username, post_id): # '''Функция для добавления комментария к публикации''' # post", "form.is_valid(): # post = form.save(commit=False) # post.author = request.user # post.save() # messages.add_message(", "}) # @login_required # def add_comment(request, username, post_id): # '''Функция для добавления комментария", "super().get_queryset() return query_set.filter( group__slug=self.kwargs['slug']).select_related( 'author', 'group') # @login_required # def new_post(request): # '''Страница", "redirect('index') # else: # form = PostForm() # return render(request, 'new_post.html', {'form': form})", "username): # '''Страница с публикациями пользователя''' # user_profile = get_object_or_404( # User.objects.filter( #", "post_id=post.pk, username=username) # else: # form = PostForm(instance=post) # else: # return redirect('post',", "DeleteView) class IndexListView(ListView): model = Post template_name = 'index.html' paginate_by = 5 context_object_name", "5 context_object_name = 'post_list' extra_context = { 'follow_page': True } def get_queryset(self): query_set", "CommentEditView(LoginRequiredMixin, UpdateView): model = Comment template_name = 'new_post.html' form_class = CommentForm pk_url_kwarg =", "= form.save(commit=False) # post.pub_date = timezone.now() # post.save() # return redirect('post', post_id=post.pk, username=username)", "timezone.now() # post.save() # return redirect('post', post_id=post.pk, username=username) # else: # form =", "# comment = form.save(commit=False) # comment.post = post # comment.author = request.user #", "def get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author, }) # @login_required # def post_delete(request,", "= Post form_class = PostForm template_name = 'new_post.html' extra_context = { 'title': 'Создать", "# if request.user == post.author: # if request.method == \"POST\": # form =", "f'Запись обновлена!', extra_tags='success' ) return super().form_valid(form) def get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author,", "success_message = 'Запись удалена' def get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author, }) #", "'''Функция для удаления комментария к публикации''' comment = get_object_or_404(Comment, post=post_id, pk=comment_id) if request.user", "GroupForm() # return render(request, \"new_post.html\", {'form': form, 'title': title}) class GroupAddView(LoginRequiredMixin, CreateView): model", "get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) following = False if self.request.user.is_authenticated: if", "'Редактировать комментарий' } def form_valid(self, form): comment = form.save(commit=False) comment.created = timezone.now() comment.save()", "def extra_context(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) following =", "if form.is_valid(): # post = form.save(commit=False) # post.author = request.user # post.save() #", "extra_context(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) following = False", "get_success_url(self): return reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk }) # @login_required # def follow_index(request):", "'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list, 10) # page_number =", "page = paginator.get_page(page_number) # return render(request, \"follow.html\", {'page': page, # 'paginator': paginator, #", "# following_count=Count('following', distinct=True))) # post_list = Post.objects.filter( # author=user_profile).select_related( # 'group', 'author').annotate( #", "followed_author == request.user: return redirect('profile', username=username) if Follow.objects.filter(user=request.user, author=followed_author).exists(): return redirect('profile', username=username) Follow.objects.create(author=followed_author,", "= Paginator(post_list, 10) # page_number = request.GET.get('page') # page = paginator.get_page(page_number) # return", "paginator.get_page(page_number) # return render(request, \"follow.html\", {'page': page, # 'paginator': paginator, # 'follow_page': follow_page})", "request.method == \"POST\": # form = PostForm(request.POST or None, # files=request.FILES or None,", "kwargs={ 'username': self.object.author, }) # @login_required # def post_delete(request, username, post_id): # '''Функция", "title}) class CommentEditView(LoginRequiredMixin, UpdateView): model = Comment template_name = 'new_post.html' form_class = CommentForm", "'post_delete.html' slug_field = 'username' pk_url_kwarg = 'post_id' success_message = 'Запись удалена' def get_success_url(self):", "def post_delete(request, username, post_id): # '''Функция для удаления публикации''' # post = get_object_or_404(Post,", "import Count from django.shortcuts import get_object_or_404, redirect, render from django.urls import reverse from", "True # return render(request, 'post_view.html', {'post': post, # 'profile': user_profile, # 'comments': post_comment,", "form_class = PostForm template_name = 'new_post.html' extra_context = { 'title': 'Редактировать запись' }", "CommentForm, GroupForm, PostForm from .models import Comment, Follow, Group, Post, User from django.views.generic", "Post.objects.filter( # author__following__user=request.user).select_related( # 'group', 'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator =", "# user_profile = get_object_or_404( # User.objects.filter(username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True), #", "# ) # return redirect('index') # else: # form = PostForm() # return", "== comment.author: # if request.method == 'POST': # form = CommentForm(request.POST, instance=comment) #", "f'Коментарий отредактирован', ) return super().form_valid(form) def get_success_url(self): return reverse('post', kwargs={'username': self.object.author, 'post_id': self.object.post.pk", "AddCommentView(LoginRequiredMixin, CreateView): model = Comment template_name = 'comments.html' form_class = CommentForm slug_field =", "Comment template_name = 'new_post.html' form_class = CommentForm pk_url_kwarg = 'comment_id' extra_context = {", "post_comment = Comment.objects.filter( # post=post_id).select_related('author').order_by(\"-created\").all() # form = CommentForm() # following = False", "request.user.is_authenticated: # if Follow.objects.filter(author=user_profile, # user=request.user).exists(): # following = True # return render(request,", "messages.SUCCESS, f'Запись обновлена!', extra_tags='success' ) return super().form_valid(form) def get_success_url(self): return reverse('profile', kwargs={ 'username':", "page, # 'paginator': paginator}) class GroupPostView(ListView): model = Post template_name = 'group.html' paginate_by", "'new_post.html' extra_context = { 'title': 'Создать новою запись' } def form_valid(self, form): post", "messages from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.core.paginator import Paginator", "# post_comment = Comment.objects.filter( # post=post_id).select_related('author').order_by(\"-created\").all() # form = CommentForm() # following =", "pk=post_id) # if request.method == 'POST': # form = CommentForm(request.POST) # if form.is_valid():", "}) # @login_required # def follow_index(request): # '''Страница с публикациями избранных пользователей''' #", "True } def get_queryset(self): query_set = super().get_queryset() return query_set.filter( author__following__user=self.request.user).select_related( 'group', 'author') @login_required", "author=followed_author).exists(): return redirect('profile', username=username) Follow.objects.create(author=followed_author, user=request.user) return redirect('profile', username=username) @login_required def profile_unfollow(request, username):", "post=post_id, pk=comment_id) if request.user == comment.author: comment.delete() return redirect('post', username=username, post_id=post_id) # @login_required", "comment_count=Count( # 'commented_post')).select_related('author', 'group'), # pk=post_id) # post_comment = Comment.objects.filter( # post=post_id).select_related('author').order_by(\"-created\").all() #", "Group, Post, User from django.views.generic import (ListView, DetailView, CreateView, UpdateView, DeleteView) class IndexListView(ListView):", "slug_field = 'username' pk_url_kwarg = 'post_id' success_message = 'Запись удалена' def get_success_url(self): return", "= paginator.get_page(page_number) # return render(request, \"group.html\", {'group': group, # 'post_list': page, # 'paginator':", "paginate_by = 5 context_object_name = 'post_list' extra_context = { 'index_page': True } #", "CreateView): model = Post form_class = PostForm template_name = 'new_post.html' extra_context = {", "model = Post template_name = 'profile.html' paginate_by = 5 context_object_name = 'post_list' @property", "= get_object_or_404(Post, pk=self.kwargs['post_id']) comment = form.save(commit=False) comment.post = post comment.author = self.request.user comment.save()", "post comment.author = self.request.user comment.save() return super().form_valid(form) def get_success_url(self): return reverse('post', kwargs={'username': self.object.author,", "if followed_author == request.user: return redirect('profile', username=username) if Follow.objects.filter(user=request.user, author=followed_author).exists(): return redirect('profile', username=username)", "комментарий' # comment = get_object_or_404(Comment, post=post_id, pk=comment_id) # if request.user == comment.author: #", "reverse('profile', kwargs={ 'username': self.object.author, }) # @login_required # def add_comment(request, username, post_id): #", "пользователей''' # follow_page = True # post_list = Post.objects.filter( # author__following__user=request.user).select_related( # 'group',", "# comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list, 10) # page_number = request.GET.get('page')", "'post_list' extra_context = { 'follow_page': True } def get_queryset(self): query_set = super().get_queryset() return", "= 'comments.html' form_class = CommentForm slug_field = 'username' pk_url_kwarg = 'post_id' def form_valid(self,", "CommentForm(request.POST, instance=comment) # if form.is_valid(): # comment = form.save(commit=False) # comment.created = timezone.now()", "на пользователя''' followed_author = get_object_or_404(User, username=username) if followed_author == request.user: return redirect('profile', username=username)", "@property def extra_context(self): profile = get_object_or_404( User.objects.filter( username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True))) following", "def new_post(request): # '''Страница создания новой публикации''' # if request.method == 'POST': #", "post = get_object_or_404( Post.objects.annotate( comment_count=Count( 'commented_post')).select_related('author', 'group'), pk=self.kwargs['post_id']) following = False if self.request.user.is_authenticated:", "@login_required # def post_delete(request, username, post_id): # '''Функция для удаления публикации''' # post", "requests import request from .forms import CommentForm, GroupForm, PostForm from .models import Comment,", "request, \"new_post.html\", {'form': form, 'title': title, 'post': post}) class PostEditUpdateView(LoginRequiredMixin, UpdateView): model =", "username=username) Follow.objects.create(author=followed_author, user=request.user) return redirect('profile', username=username) @login_required def profile_unfollow(request, username): '''Функция для отписки", "True } # def group_posts(request, slug): # '''Страница с публикиями связанными с группой'''", "post.delete() # return redirect('profile', username=username) # return redirect('post', post_id=post.pk, username=post.author) class PostDeleteView(LoginRequiredMixin, DeleteView):", "class GroupAddView(LoginRequiredMixin, CreateView): model = Group template_name = 'new_post.html' form_class = GroupForm extra_context", "добавления комментария к публикации''' # post = get_object_or_404(Post, pk=post_id) # if request.method ==", "user=request.user) follover.delete() return redirect('profile', username=username) @login_required def delete_comment(request, username, post_id, comment_id): '''Функция для", "# comment.created = timezone.now() # comment.save() # return redirect('post', username=username, post_id=post_id) # form", "def profile(request, username): # '''Страница с публикациями пользователя''' # user_profile = get_object_or_404( #", "= timezone.now() # comment.save() # return redirect('post', username=username, post_id=post_id) # form = CommentForm(instance=comment)", "False if self.request.user.is_authenticated: if Follow.objects.filter(author=user_profile, user=self.request.user).exists(): following = True return {'post': post, 'profile':", "template_name = 'new_post.html' extra_context = { 'title': 'Создать новою запись' } def form_valid(self,", "# 'group', 'author').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list, 10) #", "username=username) # return redirect('post', post_id=post_id, username=username) class AddCommentView(LoginRequiredMixin, CreateView): model = Comment template_name", "form): post = get_object_or_404(Post, pk=self.kwargs['post_id']) comment = form.save(commit=False) comment.post = post comment.author =", "redirect('profile', username=username) Follow.objects.create(author=followed_author, user=request.user) return redirect('profile', username=username) @login_required def profile_unfollow(request, username): '''Функция для", "def form_valid(self, form): post = form.save(commit=False) post.pub_date = timezone.now() post.save() messages.add_message( self.request, messages.SUCCESS,", "request from .forms import CommentForm, GroupForm, PostForm from .models import Comment, Follow, Group,", "comment = get_object_or_404(Comment, post=post_id, pk=comment_id) # if request.user == comment.author: # if request.method", "get_object_or_404(Group, slug=slug) # post_list = Post.objects.filter( # group=group).select_related( # 'author', 'group').annotate( # comment_count=Count(", "form = PostForm(instance=post) # else: # return redirect('post', post_id=post.pk, username=post.author) # return render(", "username, post_id): # '''Функция для удаления публикации''' # post = get_object_or_404(Post, pk=post_id) #", "form_class = CommentForm pk_url_kwarg = 'comment_id' extra_context = { 'title': 'Редактировать комментарий' }", "form.save(commit=False) # comment.created = timezone.now() # comment.save() # return redirect('post', username=username, post_id=post_id) #", "@login_required # def new_post(request): # '''Страница создания новой публикации''' # if request.method ==", "GroupPostView(ListView): model = Post template_name = 'group.html' paginate_by = 5 context_object_name = 'post_list'", "self.request, messages.SUCCESS, f'Новая запись добавлена' ) return super().form_valid(form) def get_success_url(self): return reverse('index') #", "= Comment.objects.filter( # post=post_id).select_related('author').order_by(\"-created\").all() # form = CommentForm() # following = False #", "\"follow.html\", {'page': page, # 'paginator': paginator, # 'follow_page': follow_page}) class FollowIndexView(LoginRequiredMixin, ListView): model", "True # return render(request, \"profile.html\", {'profile': user_profile, # 'post_list': page, # 'paginator': paginator,", "form.save(commit=False) comment.post = post comment.author = self.request.user comment.save() return super().form_valid(form) def get_success_url(self): return", "'follow_page': True } def get_queryset(self): query_set = super().get_queryset() return query_set.filter( author__following__user=self.request.user).select_related( 'group', 'author')", "author__following__user=self.request.user).select_related( 'group', 'author') @login_required def profile_follow(request, username): '''Функция для подписки на пользователя''' followed_author", "return redirect('profile', username=username) Follow.objects.create(author=followed_author, user=request.user) return redirect('profile', username=username) @login_required def profile_unfollow(request, username): '''Функция", "== request.user: return redirect('profile', username=username) if Follow.objects.filter(user=request.user, author=followed_author).exists(): return redirect('profile', username=username) Follow.objects.create(author=followed_author, user=request.user)", "# following = True # return render(request, 'post_view.html', {'post': post, # 'profile': user_profile,", "User.objects.filter( # username=username).annotate( # follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True))) # post_list = Post.objects.filter(", "Follow.objects.filter(author=profile, user=self.request.user).exists(): following = True return { 'profile': profile, 'following': following } def", "# 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list, 10) # page_number = request.GET.get('page') # page", "= 'Запись удалена' def get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author, }) # @login_required", "PostForm(request.POST or None, # files=request.FILES or None, # instance=post) # if form.is_valid(): #", "@property def extra_context(self): return { 'group': get_object_or_404(Group, slug=self.kwargs['slug']) } def get_queryset(self, *args, **kwargs):", ") return super().form_valid(form) def get_success_url(self): return reverse('index') # def post_view(request, post_id, username): #", "= 'comments' @property def extra_context(self): user_profile = get_object_or_404( User.objects.filter(username=self.kwargs['username']).annotate( follower_count=Count('follower', distinct=True), following_count=Count('following', distinct=True),", "= True return { 'profile': profile, 'following': following } def get_queryset(self): profile =", "pk=post_id) # if request.user == post.author: # if request.method == \"POST\": # form", "'post_list' @property def extra_context(self): return { 'group': get_object_or_404(Group, slug=self.kwargs['slug']) } def get_queryset(self, *args,", "Comment, Follow, Group, Post, User from django.views.generic import (ListView, DetailView, CreateView, UpdateView, DeleteView)", "post.save() messages.add_message( self.request, messages.SUCCESS, f'Новая запись добавлена' ) return super().form_valid(form) def get_success_url(self): return", "= 'username' pk_url_kwarg = 'post_id' form_class = PostForm template_name = 'new_post.html' extra_context =", "self.request, messages.SUCCESS, f'Запись обновлена!', extra_tags='success' ) return super().form_valid(form) def get_success_url(self): return reverse('profile', kwargs={", "# instance=post) # if form.is_valid(): # post = form.save(commit=False) # post.pub_date = timezone.now()", "get_success_url(self): return reverse('profile', kwargs={ 'username': self.object.author, }) # @login_required # def post_delete(request, username,", "pk=self.kwargs['post_id']) comment = form.save(commit=False) comment.post = post comment.author = self.request.user comment.save() return super().form_valid(form)", "if request.user == comment.author: comment.delete() return redirect('post', username=username, post_id=post_id) # @login_required # def", "к публикации''' # title = 'Редактировать комментарий' # comment = get_object_or_404(Comment, post=post_id, pk=comment_id)", "'title': 'Создать группу' } def get_success_url(self, **kwargs): return reverse('group', kwargs={'slug': self.object.slug}) def page_not_found(request,", "post, # 'profile': user_profile, # 'comments': post_comment, # 'form': form, # 'following': following})", "# 'author', 'group').annotate( # comment_count=Count( # 'commented_post')).order_by(\"-pub_date\").all() # paginator = Paginator(post_list, 10) #", "title = 'Редактировать комментарий' # comment = get_object_or_404(Comment, post=post_id, pk=comment_id) # if request.user", "Count from django.shortcuts import get_object_or_404, redirect, render from django.urls import reverse from django.utils", "'group', 'author') # @login_required # def post_edit(request, username, post_id): # '''Страница редактирования публикации'''", "} def form_valid(self, form): post = form.save(commit=False) post.pub_date = timezone.now() post.save() messages.add_message( self.request,", "'Редактировать запись' # post = get_object_or_404(Post.objects.select_related('author'), pk=post_id) # if request.user == post.author: #", "# return redirect('profile', username=username) # return redirect('post', post_id=post.pk, username=post.author) class PostDeleteView(LoginRequiredMixin, DeleteView): model", "# follower_count=Count('follower', distinct=True), # following_count=Count('following', distinct=True))) # post_list = Post.objects.filter( # author=user_profile).select_related( #", "'new_post.html' form_class = GroupForm extra_context = { 'title': 'Создать группу' } def get_success_url(self,", "import (ListView, DetailView, CreateView, UpdateView, DeleteView) class IndexListView(ListView): model = Post template_name =", "# if request.method == 'POST': # form = GroupForm(request.POST) # if form.is_valid(): #", "# '''Функция для удаления публикации''' # post = get_object_or_404(Post, pk=post_id) # if request.user", "= form.save(commit=False) # comment.post = post # comment.author = request.user # comment.save() #", "return render(request, 'new_post.html', {'form': form}) # @login_required class NewPostCreateView(LoginRequiredMixin, CreateView): model = Post", "\"group.html\", {'group': group, # 'post_list': page, # 'paginator': paginator}) class GroupPostView(ListView): model =", "context_object_name = 'post_list' @property def extra_context(self): return { 'group': get_object_or_404(Group, slug=self.kwargs['slug']) } def", "get_object_or_404(User, username=username) if followed_author == request.user: return redirect('profile', username=username) if Follow.objects.filter(user=request.user, author=followed_author).exists(): return", "else: # return redirect('post', post_id=post.pk, username=post.author) # return render( # request, \"new_post.html\", {'form':", "{ 'title': 'Создать новою запись' } def form_valid(self, form): post = form.save(commit=False) post.author", "distinct=True), # post_count=Count('post_author', distinct=True))) # post = get_object_or_404( # Post.objects.annotate( # comment_count=Count( #" ]
[ "form-control-solid placeholder-no-fix','placeholder':'Address'}), 'phone_number': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Phone number'}), } def clean_phone_number(self): phone_num=self.cleaned_data.get('phone_number', None) try:", "placeholder-no-fix','placeholder':'Email'}), 'address': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Address'}), 'phone_number': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Phone number'}), } def clean_phone_number(self):", "class AddressBookForm(forms.ModelForm): class Meta: model = Address fields = ['id','user','email_id','address','phone_number'] widgets = {", "data.split('@')[1] domain_list = [\"gmail.com\", \"yahoo.com\", \"hotmail.com\",] if domain not in domain_list: raise ValidationError(\"Please", "'email_id': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Email'}), 'address': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Address'}), 'phone_number': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Phone number'}),", "['id','user','email_id','address','phone_number'] widgets = { 'user': TextInput(attrs={'class':'form-control','name':'user','placeholder':'User Name'}), 'email_id': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Email'}), 'address': TextInput(attrs={'class':'form-control", "TextInput(attrs={'class':'form-control','name':'user','placeholder':'User Name'}), 'email_id': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Email'}), 'address': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Address'}), 'phone_number': TextInput(attrs={'class':'form-control form-control-solid", "number\") return phone_num def clean_email_id(self): data = self.cleaned_data['email_id'] domain = data.split('@')[1] domain_list =", "self.cleaned_data['email_id'] domain = data.split('@')[1] domain_list = [\"gmail.com\", \"yahoo.com\", \"hotmail.com\",] if domain not in", "import forms from django.core.validators import RegexValidator from .models import Address from django.forms import", "\"hotmail.com\",] if domain not in domain_list: raise ValidationError(\"Please enter an Email Address with", "phone_num def clean_email_id(self): data = self.cleaned_data['email_id'] domain = data.split('@')[1] domain_list = [\"gmail.com\", \"yahoo.com\",", "domain = data.split('@')[1] domain_list = [\"gmail.com\", \"yahoo.com\", \"hotmail.com\",] if domain not in domain_list:", "domain_list: raise ValidationError(\"Please enter an Email Address with a valid domain\") return data", "clean_phone_number(self): phone_num=self.cleaned_data.get('phone_number', None) try: int(phone_num) except (ValueError, TypeError): raise ValidationError(\"Please enter a valid", "import ModelForm, Textarea,TextInput from django.core.exceptions import ValidationError class AddressBookForm(forms.ModelForm): class Meta: model =", "TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Address'}), 'phone_number': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Phone number'}), } def clean_phone_number(self): phone_num=self.cleaned_data.get('phone_number', None)", "number'}), } def clean_phone_number(self): phone_num=self.cleaned_data.get('phone_number', None) try: int(phone_num) except (ValueError, TypeError): raise ValidationError(\"Please", "if domain not in domain_list: raise ValidationError(\"Please enter an Email Address with a", "Address fields = ['id','user','email_id','address','phone_number'] widgets = { 'user': TextInput(attrs={'class':'form-control','name':'user','placeholder':'User Name'}), 'email_id': TextInput(attrs={'class':'form-control form-control-solid", "'address': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Address'}), 'phone_number': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Phone number'}), } def clean_phone_number(self): phone_num=self.cleaned_data.get('phone_number',", "RegexValidator from .models import Address from django.forms import ModelForm, Textarea,TextInput from django.core.exceptions import", "TypeError): raise ValidationError(\"Please enter a valid phone number\") return phone_num def clean_email_id(self): data", "raise ValidationError(\"Please enter a valid phone number\") return phone_num def clean_email_id(self): data =", "TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Email'}), 'address': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Address'}), 'phone_number': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Phone number'}), }", "placeholder-no-fix','placeholder':'Phone number'}), } def clean_phone_number(self): phone_num=self.cleaned_data.get('phone_number', None) try: int(phone_num) except (ValueError, TypeError): raise", "widgets = { 'user': TextInput(attrs={'class':'form-control','name':'user','placeholder':'User Name'}), 'email_id': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Email'}), 'address': TextInput(attrs={'class':'form-control form-control-solid", "a valid phone number\") return phone_num def clean_email_id(self): data = self.cleaned_data['email_id'] domain =", "domain not in domain_list: raise ValidationError(\"Please enter an Email Address with a valid", "import RegexValidator from .models import Address from django.forms import ModelForm, Textarea,TextInput from django.core.exceptions", "model = Address fields = ['id','user','email_id','address','phone_number'] widgets = { 'user': TextInput(attrs={'class':'form-control','name':'user','placeholder':'User Name'}), 'email_id':", "Name'}), 'email_id': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Email'}), 'address': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Address'}), 'phone_number': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Phone", "ModelForm, Textarea,TextInput from django.core.exceptions import ValidationError class AddressBookForm(forms.ModelForm): class Meta: model = Address", "import ValidationError class AddressBookForm(forms.ModelForm): class Meta: model = Address fields = ['id','user','email_id','address','phone_number'] widgets", "ValidationError(\"Please enter a valid phone number\") return phone_num def clean_email_id(self): data = self.cleaned_data['email_id']", "TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Phone number'}), } def clean_phone_number(self): phone_num=self.cleaned_data.get('phone_number', None) try: int(phone_num) except (ValueError,", "'user': TextInput(attrs={'class':'form-control','name':'user','placeholder':'User Name'}), 'email_id': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Email'}), 'address': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Address'}), 'phone_number': TextInput(attrs={'class':'form-control", "ValidationError class AddressBookForm(forms.ModelForm): class Meta: model = Address fields = ['id','user','email_id','address','phone_number'] widgets =", "try: int(phone_num) except (ValueError, TypeError): raise ValidationError(\"Please enter a valid phone number\") return", "from django.core.validators import RegexValidator from .models import Address from django.forms import ModelForm, Textarea,TextInput", "= data.split('@')[1] domain_list = [\"gmail.com\", \"yahoo.com\", \"hotmail.com\",] if domain not in domain_list: raise", "= Address fields = ['id','user','email_id','address','phone_number'] widgets = { 'user': TextInput(attrs={'class':'form-control','name':'user','placeholder':'User Name'}), 'email_id': TextInput(attrs={'class':'form-control", "from django.core.exceptions import ValidationError class AddressBookForm(forms.ModelForm): class Meta: model = Address fields =", "phone number\") return phone_num def clean_email_id(self): data = self.cleaned_data['email_id'] domain = data.split('@')[1] domain_list", "valid phone number\") return phone_num def clean_email_id(self): data = self.cleaned_data['email_id'] domain = data.split('@')[1]", "form-control-solid placeholder-no-fix','placeholder':'Email'}), 'address': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Address'}), 'phone_number': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Phone number'}), } def", "Textarea,TextInput from django.core.exceptions import ValidationError class AddressBookForm(forms.ModelForm): class Meta: model = Address fields", "None) try: int(phone_num) except (ValueError, TypeError): raise ValidationError(\"Please enter a valid phone number\")", "domain_list = [\"gmail.com\", \"yahoo.com\", \"hotmail.com\",] if domain not in domain_list: raise ValidationError(\"Please enter", "= [\"gmail.com\", \"yahoo.com\", \"hotmail.com\",] if domain not in domain_list: raise ValidationError(\"Please enter an", "django.core.exceptions import ValidationError class AddressBookForm(forms.ModelForm): class Meta: model = Address fields = ['id','user','email_id','address','phone_number']", "def clean_phone_number(self): phone_num=self.cleaned_data.get('phone_number', None) try: int(phone_num) except (ValueError, TypeError): raise ValidationError(\"Please enter a", "def clean_email_id(self): data = self.cleaned_data['email_id'] domain = data.split('@')[1] domain_list = [\"gmail.com\", \"yahoo.com\", \"hotmail.com\",]", "(ValueError, TypeError): raise ValidationError(\"Please enter a valid phone number\") return phone_num def clean_email_id(self):", "= self.cleaned_data['email_id'] domain = data.split('@')[1] domain_list = [\"gmail.com\", \"yahoo.com\", \"hotmail.com\",] if domain not", "= ['id','user','email_id','address','phone_number'] widgets = { 'user': TextInput(attrs={'class':'form-control','name':'user','placeholder':'User Name'}), 'email_id': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Email'}), 'address':", "import Address from django.forms import ModelForm, Textarea,TextInput from django.core.exceptions import ValidationError class AddressBookForm(forms.ModelForm):", "'phone_number': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Phone number'}), } def clean_phone_number(self): phone_num=self.cleaned_data.get('phone_number', None) try: int(phone_num) except", "form-control-solid placeholder-no-fix','placeholder':'Phone number'}), } def clean_phone_number(self): phone_num=self.cleaned_data.get('phone_number', None) try: int(phone_num) except (ValueError, TypeError):", "django import forms from django.core.validators import RegexValidator from .models import Address from django.forms", "phone_num=self.cleaned_data.get('phone_number', None) try: int(phone_num) except (ValueError, TypeError): raise ValidationError(\"Please enter a valid phone", "[\"gmail.com\", \"yahoo.com\", \"hotmail.com\",] if domain not in domain_list: raise ValidationError(\"Please enter an Email", "AddressBookForm(forms.ModelForm): class Meta: model = Address fields = ['id','user','email_id','address','phone_number'] widgets = { 'user':", "} def clean_phone_number(self): phone_num=self.cleaned_data.get('phone_number', None) try: int(phone_num) except (ValueError, TypeError): raise ValidationError(\"Please enter", ".models import Address from django.forms import ModelForm, Textarea,TextInput from django.core.exceptions import ValidationError class", "fields = ['id','user','email_id','address','phone_number'] widgets = { 'user': TextInput(attrs={'class':'form-control','name':'user','placeholder':'User Name'}), 'email_id': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Email'}),", "django.forms import ModelForm, Textarea,TextInput from django.core.exceptions import ValidationError class AddressBookForm(forms.ModelForm): class Meta: model", "in domain_list: raise ValidationError(\"Please enter an Email Address with a valid domain\") return", "class Meta: model = Address fields = ['id','user','email_id','address','phone_number'] widgets = { 'user': TextInput(attrs={'class':'form-control','name':'user','placeholder':'User", "from django.forms import ModelForm, Textarea,TextInput from django.core.exceptions import ValidationError class AddressBookForm(forms.ModelForm): class Meta:", "placeholder-no-fix','placeholder':'Address'}), 'phone_number': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Phone number'}), } def clean_phone_number(self): phone_num=self.cleaned_data.get('phone_number', None) try: int(phone_num)", "django.core.validators import RegexValidator from .models import Address from django.forms import ModelForm, Textarea,TextInput from", "enter a valid phone number\") return phone_num def clean_email_id(self): data = self.cleaned_data['email_id'] domain", "from django import forms from django.core.validators import RegexValidator from .models import Address from", "from .models import Address from django.forms import ModelForm, Textarea,TextInput from django.core.exceptions import ValidationError", "clean_email_id(self): data = self.cleaned_data['email_id'] domain = data.split('@')[1] domain_list = [\"gmail.com\", \"yahoo.com\", \"hotmail.com\",] if", "{ 'user': TextInput(attrs={'class':'form-control','name':'user','placeholder':'User Name'}), 'email_id': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Email'}), 'address': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Address'}), 'phone_number':", "Address from django.forms import ModelForm, Textarea,TextInput from django.core.exceptions import ValidationError class AddressBookForm(forms.ModelForm): class", "= { 'user': TextInput(attrs={'class':'form-control','name':'user','placeholder':'User Name'}), 'email_id': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Email'}), 'address': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Address'}),", "forms from django.core.validators import RegexValidator from .models import Address from django.forms import ModelForm,", "\"yahoo.com\", \"hotmail.com\",] if domain not in domain_list: raise ValidationError(\"Please enter an Email Address", "Meta: model = Address fields = ['id','user','email_id','address','phone_number'] widgets = { 'user': TextInput(attrs={'class':'form-control','name':'user','placeholder':'User Name'}),", "data = self.cleaned_data['email_id'] domain = data.split('@')[1] domain_list = [\"gmail.com\", \"yahoo.com\", \"hotmail.com\",] if domain", "not in domain_list: raise ValidationError(\"Please enter an Email Address with a valid domain\")", "<gh_stars>0 from django import forms from django.core.validators import RegexValidator from .models import Address", "return phone_num def clean_email_id(self): data = self.cleaned_data['email_id'] domain = data.split('@')[1] domain_list = [\"gmail.com\",", "except (ValueError, TypeError): raise ValidationError(\"Please enter a valid phone number\") return phone_num def", "int(phone_num) except (ValueError, TypeError): raise ValidationError(\"Please enter a valid phone number\") return phone_num" ]
[]
[]
[ "subparsers.add_parser(name, help='redis backend') parser.add_argument('--address', metavar='URL', default='redis://localhost', help='the redis server address') parser.add_argument('--select', metavar='DB', type=int,", "authenticate(self, credentials: AuthenticationCredentials) \\ -> Identity: config = self._config redis = await self._connect_redis()", "login = Login(config, connect_redis) return cls(login, config, status), config @classmethod async def _connect_redis(cls,", "address(self) -> str: \"\"\"The redis server address. Defaults to a connection to localhost.", "def __init__(self, config: Config, tokens: TokensInterface, redis: Redis, name: str, role: str =", "config = self._config redis = await self._connect_redis() authcid = credentials.authcid token_key: Optional[bytes] =", "import create_redis, Redis, ConnectionClosedError from pysasl.creds import AuthenticationCredentials from pymap.bytes import BytesFormat from", "GlobalKeys, CleanupKeys, \\ NamespaceKeys from .mailbox import Message, MailboxSet from ..session import BaseSession", "self._name = name self._role = role @property def name(self) -> str: return self._name", "from pysasl.creds import AuthenticationCredentials from pymap.bytes import BytesFormat from pymap.config import BackendCapability, IMAPConfig", "address: str, select: Optional[int], separator: bytes, prefix: bytes, users_prefix: bytes, users_json: bool, **extra:", "JSON object with a ``\"password\"`` attribute, instead of a redis hash with a", "users_prefix(self) -> bytes: \"\"\"The prefix for user lookup keys.\"\"\" return self._users_prefix @property def", "self._connect_redis = connect_redis self._tokens = AllTokens() @property def tokens(self) -> TokensInterface: return self._tokens", "-> None: config = self._config global_keys = config._global_keys connect_redis = partial(self._connect_redis, config, self._status)", "status), config @classmethod async def _connect_redis(cls, config: Config, status: HealthStatus) -> Redis: try:", "``\"password\"`` attribute, instead of a redis hash with a ``password`` key. See Also:", "self._prefix = prefix self._users_prefix = users_prefix self._users_json = users_json @property def backend_capability(self) ->", "redis: Redis, global_keys: GlobalKeys, user: str) -> bytes: user_key = user.encode('utf-8') new_namespace =", "self.config redis = self.redis if self._role != 'admin' and metadata.role: raise NotAllowedError('Cannot assign", "redis server address. Defaults to a connection to localhost. See Also: :func:`aioredis.create_connection` \"\"\"", "= role @property def name(self) -> str: return self._name @property def redis(self) ->", "implementation for the redis backend.\"\"\" def __init__(self, config: Config, connect_redis: Callable[[], Awaitable[Redis]]) ->", "!= credentials.identity: raise AuthorizationFailure() return Identity(config, self.tokens, redis, credentials.identity, role) class Identity(IdentityInterface): \"\"\"The", "credentials: AuthenticationCredentials) \\ -> Identity: config = self._config redis = await self._connect_redis() authcid", "AuthorizationFailure() return Identity(config, self.tokens, redis, credentials.identity, role) class Identity(IdentityInterface): \"\"\"The identity implementation for", "\\ NamespaceKeys from .mailbox import Message, MailboxSet from ..session import BaseSession __all__ =", "self._mailbox_set @property def filter_set(self) -> FilterSet: return self._filter_set class Login(LoginInterface): \"\"\"The login implementation", "self._users_prefix = users_prefix self._users_json = users_json @property def backend_capability(self) -> BackendCapability: return BackendCapability(idle=True,", "contains JSON. \"\"\" def __init__(self, args: Namespace, *, address: str, select: Optional[int], separator:", "= self._config global_keys = config._global_keys connect_redis = partial(self._connect_redis, config, self._status) cleanup_task = CleanupTask(connect_redis,", "def prefix(self) -> bytes: \"\"\"The prefix for mail data keys. This prefix does", "def select(self) -> Optional[int]: \"\"\"The redis database for mail data. If given, the", "@classmethod def parse_args(cls, args: Namespace) -> Mapping[str, Any]: return {**super().parse_args(args), 'address': args.address, 'select':", "user.encode('utf-8') new_namespace = uuid.uuid4().hex.encode('ascii') ns_val = b'%d/%b' % (DATA_VERSION, new_namespace) multi = redis.multi_exec()", "in metadata.params: return None key = bytes.fromhex(metadata.params['key']) return self.tokens.get_login_token(self.name, key) @asynccontextmanager async def", "successful user lookup. .. _SELECT: https://redis.io/commands/select \"\"\" return self._select @property def separator(self) ->", "return self._prefix @property def users_prefix(self) -> bytes: \"\"\"The prefix for user lookup keys.\"\"\"", "prefix self._users_prefix = users_prefix self._users_json = users_json @property def backend_capability(self) -> BackendCapability: return", "init(cls, args: Namespace, **overrides: Any) \\ -> tuple[RedisBackend, Config]: config = Config.from_args(args) status", "= config self._connect_redis = connect_redis self._tokens = AllTokens() @property def tokens(self) -> TokensInterface:", "import token_bytes from typing import Any, Optional, Final from aioredis import create_redis, Redis,", "def get(self) -> UserMetadata: redis = self.redis user_bytes = self.name.encode('utf-8') user_key = self.config._users_root.end(user_bytes)", "add_subparser(cls, name: str, subparsers: Any) -> ArgumentParser: parser = subparsers.add_parser(name, help='redis backend') parser.add_argument('--address',", "role: Optional[str] = None if credentials.authcid_type == 'admin-token': authcid = credentials.identity role =", "help='redis backend') parser.add_argument('--address', metavar='URL', default='redis://localhost', help='the redis server address') parser.add_argument('--select', metavar='DB', type=int, help='the", "return GlobalKeys(key) @classmethod def parse_args(cls, args: Namespace) -> Mapping[str, Any]: return {**super().parse_args(args), 'address':", "GlobalKeys(key) @classmethod def parse_args(cls, args: Namespace) -> Mapping[str, Any]: return {**super().parse_args(args), 'address': args.address,", "the redis backend.\"\"\" def __init__(self, config: Config, tokens: TokensInterface, redis: Redis, name: str,", "from .keys import DATA_VERSION, RedisKey, GlobalKeys, CleanupKeys, \\ NamespaceKeys from .mailbox import Message,", "def _connect_redis(cls, config: Config, status: HealthStatus) -> Redis: try: redis = await create_redis(config.address)", "self._config = config self._mailbox_set = mailbox_set self._filter_set = filter_set @property def config(self) ->", "await self._connect_redis() authcid = credentials.authcid token_key: Optional[bytes] = None role: Optional[str] = None", "Namespace from collections.abc import Awaitable, Callable, Mapping, AsyncIterator from contextlib import closing, asynccontextmanager,", "status: HealthStatus) -> None: super().__init__() self._login = login self._config = config self._status =", "except (ConnectionClosedError, OSError): status.set_unhealthy() raise else: status.set_healthy() stack = connection_exit.get() stack.enter_context(closing(redis)) return redis", "(ConnectionClosedError, OSError): status.set_unhealthy() raise else: status.set_healthy() stack = connection_exit.get() stack.enter_context(closing(redis)) return redis async", "-> bytes: \"\"\"The bytestring used to separate segments of composite redis keys.\"\"\" return", "metadata.params: token_key = bytes.fromhex(metadata.params['key']) role = role or metadata.role await metadata.check_password(credentials, token_key=token_key) if", "connection_exit from pymap.exceptions import AuthorizationFailure, IncompatibleData, \\ NotAllowedError, UserNotFound from pymap.health import HealthStatus", "UserNotFound(self.name) data_dict = json.loads(json_data) else: data_dict = await redis.hgetall(user_key, encoding='utf-8') if data_dict is", "Any, Optional, Final from aioredis import create_redis, Redis, ConnectionClosedError from pysasl.creds import AuthenticationCredentials", "redis.multi_exec() multi.delete(user_key) multi.hmset_dict(user_key, user_dict) await multi.execute() async def delete(self) -> None: config =", "@property def status(self) -> HealthStatus: return self._status @classmethod def add_subparser(cls, name: str, subparsers:", "redis self._config = config self._mailbox_set = mailbox_set self._filter_set = filter_set @property def config(self)", "Namespace) -> Mapping[str, Any]: return {**super().parse_args(args), 'address': args.address, 'select': args.select, 'separator': args.separator.encode('utf-8'), 'prefix':", "= redis.multi_exec() multi.delete(user_key) multi.hmset_dict(user_key, user_dict) await multi.execute() async def delete(self) -> None: config", "separate segments of composite redis keys.\"\"\" return self._separator @property def prefix(self) -> bytes:", "subparsers: Any) -> ArgumentParser: parser = subparsers.add_parser(name, help='redis backend') parser.add_argument('--address', metavar='URL', default='redis://localhost', help='the", "@property def name(self) -> str: return self._name @property def redis(self) -> Redis: redis", "IncompatibleData() return namespace async def get(self) -> UserMetadata: redis = self.redis user_bytes =", "to :attr:`.users_key`. \"\"\" return self._prefix @property def users_prefix(self) -> bytes: \"\"\"The prefix for", "tokens self._redis: Optional[Redis] = redis self._name = name self._role = role @property def", "UserNotFound(self.name) return UserMetadata(self.config, **data_dict) async def set(self, metadata: UserMetadata) -> None: config =", "from pymap.token import AllTokens from pymap.user import UserMetadata from .cleanup import CleanupTask from", "str = None) -> None: super().__init__() self.config: Final = config self.tokens: Final =", "= ['RedisBackend', 'Config', 'Session'] class RedisBackend(BackendInterface): \"\"\"Defines a backend that uses redis data", "'address': args.address, 'select': args.select, 'separator': args.separator.encode('utf-8'), 'prefix': args.prefix.encode('utf-8'), 'users_prefix': args.users_prefix.encode('utf-8'), 'users_json': args.users_json} class", "metadata.to_dict(key=token_bytes().hex()) if self.config.users_json: json_data = json.dumps(user_dict) await redis.set(user_key, json_data) else: multi = redis.multi_exec()", "@property def prefix(self) -> bytes: \"\"\"The prefix for mail data keys. This prefix", "-> bool: \"\"\"True if the value from the user lookup key contains a", "-> None: config = self.config user_key = config._users_root.end(self.name.encode('utf-8')) if not await self.redis.delete(user_key): raise", "from ..session import BaseSession __all__ = ['RedisBackend', 'Config', 'Session'] class RedisBackend(BackendInterface): \"\"\"Defines a", "__all__ = ['RedisBackend', 'Config', 'Session'] class RedisBackend(BackendInterface): \"\"\"Defines a backend that uses redis", "if self._role != 'admin' and metadata.role: raise NotAllowedError('Cannot assign role.') user_key = config._users_root.end(self.name.encode('utf-8'))", "from typing import Any, Optional, Final from aioredis import create_redis, Redis, ConnectionClosedError from", "redis is None: # Other methods may not be called after new_session(), since", "@classmethod async def _connect_redis(cls, config: Config, status: HealthStatus) -> Redis: try: redis =", "= config._global_keys connect_redis = partial(self._connect_redis, config, self._status) cleanup_task = CleanupTask(connect_redis, global_keys).start() stack.callback(cleanup_task.cancel) class", "TokensInterface, redis: Redis, name: str, role: str = None) -> None: super().__init__() self.config:", "user lookup value contains JSON') return parser @classmethod async def init(cls, args: Namespace,", "key = RedisKey(self._joiner, [self.prefix], {}) return GlobalKeys(key) @classmethod def parse_args(cls, args: Namespace) ->", "def init(cls, args: Namespace, **overrides: Any) \\ -> tuple[RedisBackend, Config]: config = Config.from_args(args)", "does not apply to :attr:`.users_key`. \"\"\" return self._prefix @property def users_prefix(self) -> bytes:", "import json import uuid from argparse import ArgumentParser, Namespace from collections.abc import Awaitable,", "GlobalKeys: key = RedisKey(self._joiner, [self.prefix], {}) return GlobalKeys(key) @classmethod def parse_args(cls, args: Namespace)", "SELECT on the connection. raise RuntimeError() return redis async def new_token(self, *, expiration:", "if self.config.users_json: json_data = json.dumps(user_dict) await redis.set(user_key, json_data) else: multi = redis.multi_exec() multi.delete(user_key)", "return BackendCapability(idle=True, object_id=True, multi_append=True) @property def address(self) -> str: \"\"\"The redis server address.", "bytestring used to separate segments of composite redis keys.\"\"\" return self._separator @property def", "None if credentials.authcid_type == 'admin-token': authcid = credentials.identity role = 'admin' try: authcid_identity", "after new_session(), since it # may have called SELECT on the connection. raise", "user_key = self.config._users_root.end(user_bytes) if self.config.users_json: json_data = await redis.get(user_key) if json_data is None:", "config = self._config global_keys = config._global_keys connect_redis = partial(self._connect_redis, config, self._status) cleanup_task =", "self._config @property def status(self) -> HealthStatus: return self._status @classmethod def add_subparser(cls, name: str,", "await multi.execute() async def delete(self) -> None: config = self.config user_key = config._users_root.end(self.name.encode('utf-8'))", "= redis self._name = name self._role = role @property def name(self) -> str:", "The prefix for mail data keys. users_prefix: The user lookup key prefix. users_json:", "self.redis if self._role != 'admin' and metadata.role: raise NotAllowedError('Cannot assign role.') user_key =", "= FilterSet(redis, ns_keys) try: await mailbox_set.add_mailbox('INBOX') except ValueError: pass yield Session(redis, self.name, config,", "= partial(cls._connect_redis, config, status) login = Login(config, connect_redis) return cls(login, config, status), config", "user lookup key prefix') parser.add_argument('--users-json', action='store_true', help='the user lookup value contains JSON') return", "import Awaitable, Callable, Mapping, AsyncIterator from contextlib import closing, asynccontextmanager, AsyncExitStack from datetime", "for mail data. separator: The redis key segment separator. prefix: The prefix for", "from functools import partial from secrets import token_bytes from typing import Any, Optional,", "-> bytes: user_key = user.encode('utf-8') new_namespace = uuid.uuid4().hex.encode('ascii') ns_val = b'%d/%b' % (DATA_VERSION,", "a ``\"password\"`` attribute, instead of a redis hash with a ``password`` key. See", "-> IMAPConfig: return self._config @property def mailbox_set(self) -> MailboxSet: return self._mailbox_set @property def", "from pymap.interfaces.backend import BackendInterface from pymap.interfaces.login import LoginInterface, IdentityInterface from pymap.interfaces.token import TokensInterface", "self._status @classmethod def add_subparser(cls, name: str, subparsers: Any) -> ArgumentParser: parser = subparsers.add_parser(name,", "backend.\"\"\" def __init__(self, config: Config, connect_redis: Callable[[], Awaitable[Redis]]) -> None: super().__init__() self._config =", "role or metadata.role await metadata.check_password(credentials, token_key=token_key) if role != 'admin' and authcid !=", "None: await redis.select(config.select) global_keys = config._global_keys namespace = await self._get_namespace(redis, global_keys, self.name) ns_keys", "= role or metadata.role await metadata.check_password(credentials, token_key=token_key) if role != 'admin' and authcid", "RedisKey, GlobalKeys, CleanupKeys, \\ NamespaceKeys from .mailbox import Message, MailboxSet from ..session import", "def add_subparser(cls, name: str, subparsers: Any) -> ArgumentParser: parser = subparsers.add_parser(name, help='redis backend')", "RedisKey(self._joiner, [self.users_prefix], {}) @property def _global_keys(self) -> GlobalKeys: key = RedisKey(self._joiner, [self.prefix], {})", "import AuthenticationCredentials from pymap.bytes import BytesFormat from pymap.config import BackendCapability, IMAPConfig from pymap.context", "OSError): status.set_unhealthy() raise else: status.set_healthy() stack = connection_exit.get() stack.enter_context(closing(redis)) return redis async def", "-> ArgumentParser: parser = subparsers.add_parser(name, help='redis backend') parser.add_argument('--address', metavar='URL', default='redis://localhost', help='the redis server", "= prefix self._users_prefix = users_prefix self._users_json = users_json @property def backend_capability(self) -> BackendCapability:", "set(self, metadata: UserMetadata) -> None: config = self.config redis = self.redis if self._role", "import datetime from functools import partial from secrets import token_bytes from typing import", "IMAPConfig: return self._config @property def mailbox_set(self) -> MailboxSet: return self._mailbox_set @property def filter_set(self)", "namespace = await self._get_namespace(redis, global_keys, self.name) ns_keys = NamespaceKeys(global_keys, namespace) cl_keys = CleanupKeys(global_keys)", "bytes.fromhex(metadata.params['key']) role = role or metadata.role await metadata.check_password(credentials, token_key=token_key) if role != 'admin'", "return self._users_prefix @property def users_json(self) -> bool: \"\"\"True if the value from the", "CleanupKeys(global_keys) mailbox_set = MailboxSet(redis, ns_keys, cl_keys) filter_set = FilterSet(redis, ns_keys) try: await mailbox_set.add_mailbox('INBOX')", "create_redis(config.address) except (ConnectionClosedError, OSError): status.set_unhealthy() raise else: status.set_healthy() stack = connection_exit.get() stack.enter_context(closing(redis)) return", "\"\"\"The bytestring used to separate segments of composite redis keys.\"\"\" return self._separator @property", "Callable, Mapping, AsyncIterator from contextlib import closing, asynccontextmanager, AsyncExitStack from datetime import datetime", "self.tokens.get_login_token(self.name, key) @asynccontextmanager async def new_session(self) -> AsyncIterator[Session]: config = self.config redis =", "uses redis data structures for mailbox storage. \"\"\" def __init__(self, login: Login, config:", "Redis, ConnectionClosedError from pysasl.creds import AuthenticationCredentials from pymap.bytes import BytesFormat from pymap.config import", "help='the mail data key prefix') parser.add_argument('--users-prefix', metavar='VAL', default='/users', help='the user lookup key prefix')", "class Identity(IdentityInterface): \"\"\"The identity implementation for the redis backend.\"\"\" def __init__(self, config: Config,", "NamespaceKeys(global_keys, namespace) cl_keys = CleanupKeys(global_keys) mailbox_set = MailboxSet(redis, ns_keys, cl_keys) filter_set = FilterSet(redis,", "_get_namespace(self, redis: Redis, global_keys: GlobalKeys, user: str) -> bytes: user_key = user.encode('utf-8') new_namespace", "== 'admin-token': authcid = credentials.identity role = 'admin' try: authcid_identity = Identity(config, self.tokens,", "= await redis.hgetall(user_key, encoding='utf-8') if data_dict is None: raise UserNotFound(self.name) return UserMetadata(self.config, **data_dict)", "lookup key prefix. users_json: True if the user lookup value contains JSON. \"\"\"", "@property def address(self) -> str: \"\"\"The redis server address. Defaults to a connection", "args.users_json} class Session(BaseSession[Message]): \"\"\"The session implementation for the redis backend.\"\"\" resource = __name__", "role.') user_key = config._users_root.end(self.name.encode('utf-8')) user_dict = metadata.to_dict(key=token_bytes().hex()) if self.config.users_json: json_data = json.dumps(user_dict) await", "IncompatibleData, \\ NotAllowedError, UserNotFound from pymap.health import HealthStatus from pymap.interfaces.backend import BackendInterface from", "self._filter_set class Login(LoginInterface): \"\"\"The login implementation for the redis backend.\"\"\" def __init__(self, config:", "= bytes.fromhex(metadata.params['key']) role = role or metadata.role await metadata.check_password(credentials, token_key=token_key) if role !=", "b'%d/%b' % (DATA_VERSION, new_namespace) multi = redis.multi_exec() multi.hsetnx(global_keys.namespaces, user_key, ns_val) multi.hget(global_keys.namespaces, user_key) _,", "_users_root(self) -> RedisKey: return RedisKey(self._joiner, [self.users_prefix], {}) @property def _global_keys(self) -> GlobalKeys: key", "default='redis://localhost', help='the redis server address') parser.add_argument('--select', metavar='DB', type=int, help='the redis database for mail", "return self._mailbox_set @property def filter_set(self) -> FilterSet: return self._filter_set class Login(LoginInterface): \"\"\"The login", "token_bytes from typing import Any, Optional, Final from aioredis import create_redis, Redis, ConnectionClosedError", "config: Config, status: HealthStatus) -> Redis: try: redis = await create_redis(config.address) except (ConnectionClosedError,", "data. If given, the `SELECT`_ command is called after successful user lookup. ..", "redis backend.\"\"\" def __init__(self, config: Config, connect_redis: Callable[[], Awaitable[Redis]]) -> None: super().__init__() self._config", "lookup value contains JSON') return parser @classmethod async def init(cls, args: Namespace, **overrides:", "def __init__(self, args: Namespace, *, address: str, select: Optional[int], separator: bytes, prefix: bytes,", "@property def config(self) -> Config: return self._config @property def status(self) -> HealthStatus: return", "expiration: datetime = None) -> Optional[str]: metadata = await self.get() if 'key' not", "address. select: The redis database for mail data. separator: The redis key segment", "self._config @property def mailbox_set(self) -> MailboxSet: return self._mailbox_set @property def filter_set(self) -> FilterSet:", "None: # Other methods may not be called after new_session(), since it #", "\"\"\" return self._address @property def select(self) -> Optional[int]: \"\"\"The redis database for mail", "= connect_redis self._tokens = AllTokens() @property def tokens(self) -> TokensInterface: return self._tokens async", "Identity(IdentityInterface): \"\"\"The identity implementation for the redis backend.\"\"\" def __init__(self, config: Config, tokens:", "1) if int(version) != DATA_VERSION: raise IncompatibleData() return namespace async def get(self) ->", "import HealthStatus from pymap.interfaces.backend import BackendInterface from pymap.interfaces.login import LoginInterface, IdentityInterface from pymap.interfaces.token", "self._prefix @property def users_prefix(self) -> bytes: \"\"\"The prefix for user lookup keys.\"\"\" return", "redis.hgetall(user_key, encoding='utf-8') if data_dict is None: raise UserNotFound(self.name) return UserMetadata(self.config, **data_dict) async def", "connect_redis = partial(cls._connect_redis, config, status) login = Login(config, connect_redis) return cls(login, config, status),", "JSON. \"\"\" def __init__(self, args: Namespace, *, address: str, select: Optional[int], separator: bytes,", "def _users_root(self) -> RedisKey: return RedisKey(self._joiner, [self.users_prefix], {}) @property def _global_keys(self) -> GlobalKeys:", "import annotations import json import uuid from argparse import ArgumentParser, Namespace from collections.abc", "metavar='DB', type=int, help='the redis database for mail data') parser.add_argument('--separator', metavar='CHAR', default='/', help='the redis", "super().__init__() self._login = login self._config = config self._status = status @property def login(self)", "admin_key=token_bytes(), **extra) self._address = address self._select = select self._separator = separator self._prefix =", "ArgumentParser, Namespace from collections.abc import Awaitable, Callable, Mapping, AsyncIterator from contextlib import closing,", "UserNotFound: metadata = UserMetadata(config) if 'key' in metadata.params: token_key = bytes.fromhex(metadata.params['key']) role =", "**overrides: Any) \\ -> tuple[RedisBackend, Config]: config = Config.from_args(args) status = HealthStatus() connect_redis", "mail data') parser.add_argument('--separator', metavar='CHAR', default='/', help='the redis key segment separator') parser.add_argument('--prefix', metavar='VAL', default='/mail',", "int(version) != DATA_VERSION: raise IncompatibleData() return namespace async def get(self) -> UserMetadata: redis", "= self.redis if self._role != 'admin' and metadata.role: raise NotAllowedError('Cannot assign role.') user_key", "type=int, help='the redis database for mail data') parser.add_argument('--separator', metavar='CHAR', default='/', help='the redis key", "config = self.config redis = self.redis self._redis = None if config.select is not", "'Config', 'Session'] class RedisBackend(BackendInterface): \"\"\"Defines a backend that uses redis data structures for", "JSON') return parser @classmethod async def init(cls, args: Namespace, **overrides: Any) \\ ->", "config implementation for the redis backend. Args: args: The command-line arguments. address: The", "await create_redis(config.address) except (ConnectionClosedError, OSError): status.set_unhealthy() raise else: status.set_healthy() stack = connection_exit.get() stack.enter_context(closing(redis))", "The redis server address. select: The redis database for mail data. separator: The", "on the connection. raise RuntimeError() return redis async def new_token(self, *, expiration: datetime", "redis = self.redis if self._role != 'admin' and metadata.role: raise NotAllowedError('Cannot assign role.')", "HealthStatus: return self._status @classmethod def add_subparser(cls, name: str, subparsers: Any) -> ArgumentParser: parser", "ns_val = await multi.execute() version, namespace = ns_val.split(b'/', 1) if int(version) != DATA_VERSION:", "= RedisKey(self._joiner, [self.prefix], {}) return GlobalKeys(key) @classmethod def parse_args(cls, args: Namespace) -> Mapping[str,", "address: The redis server address. select: The redis database for mail data. separator:", "role = 'admin' try: authcid_identity = Identity(config, self.tokens, redis, authcid) metadata = await", "key prefix') parser.add_argument('--users-json', action='store_true', help='the user lookup value contains JSON') return parser @classmethod", "@property def _global_keys(self) -> GlobalKeys: key = RedisKey(self._joiner, [self.prefix], {}) return GlobalKeys(key) @classmethod", "None: super().__init__() self._config = config self._connect_redis = connect_redis self._tokens = AllTokens() @property def", "config self._connect_redis = connect_redis self._tokens = AllTokens() @property def tokens(self) -> TokensInterface: return", "from pymap.exceptions import AuthorizationFailure, IncompatibleData, \\ NotAllowedError, UserNotFound from pymap.health import HealthStatus from", "a backend that uses redis data structures for mailbox storage. \"\"\" def __init__(self,", "self._status = status @property def login(self) -> Login: return self._login @property def config(self)", "Config, status: HealthStatus) -> Redis: try: redis = await create_redis(config.address) except (ConnectionClosedError, OSError):", "= config self._status = status @property def login(self) -> Login: return self._login @property", "value contains JSON. \"\"\" def __init__(self, args: Namespace, *, address: str, select: Optional[int],", "Final from aioredis import create_redis, Redis, ConnectionClosedError from pysasl.creds import AuthenticationCredentials from pymap.bytes", ":func:`aioredis.create_connection` \"\"\" return self._address @property def select(self) -> Optional[int]: \"\"\"The redis database for", "redis self._name = name self._role = role @property def name(self) -> str: return", "bytes: \"\"\"The bytestring used to separate segments of composite redis keys.\"\"\" return self._separator", "CleanupTask from .filter import FilterSet from .keys import DATA_VERSION, RedisKey, GlobalKeys, CleanupKeys, \\", "bytes, users_prefix: bytes, users_json: bool, **extra: Any) -> None: super().__init__(args, admin_key=token_bytes(), **extra) self._address", "self._login = login self._config = config self._status = status @property def login(self) ->", "def mailbox_set(self) -> MailboxSet: return self._mailbox_set @property def filter_set(self) -> FilterSet: return self._filter_set", "'admin' and authcid != credentials.identity: raise AuthorizationFailure() return Identity(config, self.tokens, redis, credentials.identity, role)", "def config(self) -> IMAPConfig: return self._config @property def mailbox_set(self) -> MailboxSet: return self._mailbox_set", "redis, authcid) metadata = await authcid_identity.get() except UserNotFound: metadata = UserMetadata(config) if 'key'", "pysasl.creds import AuthenticationCredentials from pymap.bytes import BytesFormat from pymap.config import BackendCapability, IMAPConfig from", "= name self._role = role @property def name(self) -> str: return self._name @property", "if 'key' in metadata.params: token_key = bytes.fromhex(metadata.params['key']) role = role or metadata.role await", "await metadata.check_password(credentials, token_key=token_key) if role != 'admin' and authcid != credentials.identity: raise AuthorizationFailure()", "metadata.check_password(credentials, token_key=token_key) if role != 'admin' and authcid != credentials.identity: raise AuthorizationFailure() return", "cls(login, config, status), config @classmethod async def _connect_redis(cls, config: Config, status: HealthStatus) ->", "is not None: await redis.select(config.select) global_keys = config._global_keys namespace = await self._get_namespace(redis, global_keys,", "\"\"\"The login implementation for the redis backend.\"\"\" def __init__(self, config: Config, connect_redis: Callable[[],", "MailboxSet, filter_set: FilterSet) -> None: super().__init__(owner) self._redis = redis self._config = config self._mailbox_set", "contains a JSON object with a ``\"password\"`` attribute, instead of a redis hash", "Login(LoginInterface): \"\"\"The login implementation for the redis backend.\"\"\" def __init__(self, config: Config, connect_redis:", "if data_dict is None: raise UserNotFound(self.name) return UserMetadata(self.config, **data_dict) async def set(self, metadata:", "NotAllowedError, UserNotFound from pymap.health import HealthStatus from pymap.interfaces.backend import BackendInterface from pymap.interfaces.login import", "user_dict = metadata.to_dict(key=token_bytes().hex()) if self.config.users_json: json_data = json.dumps(user_dict) await redis.set(user_key, json_data) else: multi", "to a connection to localhost. See Also: :func:`aioredis.create_connection` \"\"\" return self._address @property def", "lookup key prefix') parser.add_argument('--users-json', action='store_true', help='the user lookup value contains JSON') return parser", "the connection. raise RuntimeError() return redis async def new_token(self, *, expiration: datetime =", "['RedisBackend', 'Config', 'Session'] class RedisBackend(BackendInterface): \"\"\"Defines a backend that uses redis data structures", "the redis backend.\"\"\" resource = __name__ def __init__(self, redis: Redis, owner: str, config:", "mail data keys. users_prefix: The user lookup key prefix. users_json: True if the", "user lookup. .. _SELECT: https://redis.io/commands/select \"\"\" return self._select @property def separator(self) -> bytes:", "lookup. .. _SELECT: https://redis.io/commands/select \"\"\" return self._select @property def separator(self) -> bytes: \"\"\"The", "UserNotFound from pymap.health import HealthStatus from pymap.interfaces.backend import BackendInterface from pymap.interfaces.login import LoginInterface,", "Optional, Final from aioredis import create_redis, Redis, ConnectionClosedError from pysasl.creds import AuthenticationCredentials from", "self._name @property def redis(self) -> Redis: redis = self._redis if redis is None:", "super().__init__(owner) self._redis = redis self._config = config self._mailbox_set = mailbox_set self._filter_set = filter_set", "parser.add_argument('--address', metavar='URL', default='redis://localhost', help='the redis server address') parser.add_argument('--select', metavar='DB', type=int, help='the redis database", "from pymap.config import BackendCapability, IMAPConfig from pymap.context import connection_exit from pymap.exceptions import AuthorizationFailure,", "-> bytes: \"\"\"The prefix for user lookup keys.\"\"\" return self._users_prefix @property def users_json(self)", "server address. select: The redis database for mail data. separator: The redis key", "None: super().__init__() self._login = login self._config = config self._status = status @property def", "self.config._users_root.end(user_bytes) if self.config.users_json: json_data = await redis.get(user_key) if json_data is None: raise UserNotFound(self.name)", "import ArgumentParser, Namespace from collections.abc import Awaitable, Callable, Mapping, AsyncIterator from contextlib import", "\"\"\"The redis server address. Defaults to a connection to localhost. See Also: :func:`aioredis.create_connection`", "self._config redis = await self._connect_redis() authcid = credentials.authcid token_key: Optional[bytes] = None role:", "-> None: super().__init__() self._config = config self._connect_redis = connect_redis self._tokens = AllTokens() @property", "datetime = None) -> Optional[str]: metadata = await self.get() if 'key' not in", "bytes, prefix: bytes, users_prefix: bytes, users_json: bool, **extra: Any) -> None: super().__init__(args, admin_key=token_bytes(),", "aioredis import create_redis, Redis, ConnectionClosedError from pysasl.creds import AuthenticationCredentials from pymap.bytes import BytesFormat", "server address') parser.add_argument('--select', metavar='DB', type=int, help='the redis database for mail data') parser.add_argument('--separator', metavar='CHAR',", "return {**super().parse_args(args), 'address': args.address, 'select': args.select, 'separator': args.separator.encode('utf-8'), 'prefix': args.prefix.encode('utf-8'), 'users_prefix': args.users_prefix.encode('utf-8'), 'users_json':", "None: super().__init__(owner) self._redis = redis self._config = config self._mailbox_set = mailbox_set self._filter_set =", "config._global_keys namespace = await self._get_namespace(redis, global_keys, self.name) ns_keys = NamespaceKeys(global_keys, namespace) cl_keys =", "metadata: UserMetadata) -> None: config = self.config redis = self.redis if self._role !=", "name: str, subparsers: Any) -> ArgumentParser: parser = subparsers.add_parser(name, help='redis backend') parser.add_argument('--address', metavar='URL',", "Optional[Redis] = redis self._name = name self._role = role @property def name(self) ->", "async def _connect_redis(cls, config: Config, status: HealthStatus) -> Redis: try: redis = await", "pymap.config import BackendCapability, IMAPConfig from pymap.context import connection_exit from pymap.exceptions import AuthorizationFailure, IncompatibleData,", "@property def login(self) -> Login: return self._login @property def config(self) -> Config: return", "else: data_dict = await redis.hgetall(user_key, encoding='utf-8') if data_dict is None: raise UserNotFound(self.name) return", "mailbox_set: MailboxSet, filter_set: FilterSet) -> None: super().__init__(owner) self._redis = redis self._config = config", "str, role: str = None) -> None: super().__init__() self.config: Final = config self.tokens:", "= await self.get() if 'key' not in metadata.params: return None key = bytes.fromhex(metadata.params['key'])", "class Config(IMAPConfig): \"\"\"The config implementation for the redis backend. Args: args: The command-line", "\"\"\"Defines a backend that uses redis data structures for mailbox storage. \"\"\" def", "mail data. separator: The redis key segment separator. prefix: The prefix for mail", "If given, the `SELECT`_ command is called after successful user lookup. .. _SELECT:", "async def new_token(self, *, expiration: datetime = None) -> Optional[str]: metadata = await", "config: Config, status: HealthStatus) -> None: super().__init__() self._login = login self._config = config", "address self._select = select self._separator = separator self._prefix = prefix self._users_prefix = users_prefix", "config @classmethod async def _connect_redis(cls, config: Config, status: HealthStatus) -> Redis: try: redis", "the redis backend. Args: args: The command-line arguments. address: The redis server address.", "Redis: try: redis = await create_redis(config.address) except (ConnectionClosedError, OSError): status.set_unhealthy() raise else: status.set_healthy()", "= select self._separator = separator self._prefix = prefix self._users_prefix = users_prefix self._users_json =", "mail data. If given, the `SELECT`_ command is called after successful user lookup.", "mailbox_set.add_mailbox('INBOX') except ValueError: pass yield Session(redis, self.name, config, mailbox_set, filter_set) async def _get_namespace(self,", "for mail data keys. users_prefix: The user lookup key prefix. users_json: True if", "datetime from functools import partial from secrets import token_bytes from typing import Any,", "command-line arguments. address: The redis server address. select: The redis database for mail", "status(self) -> HealthStatus: return self._status @classmethod def add_subparser(cls, name: str, subparsers: Any) ->", "-> Redis: try: redis = await create_redis(config.address) except (ConnectionClosedError, OSError): status.set_unhealthy() raise else:", "redis server address. select: The redis database for mail data. separator: The redis", "self.config.users_json: json_data = json.dumps(user_dict) await redis.set(user_key, json_data) else: multi = redis.multi_exec() multi.delete(user_key) multi.hmset_dict(user_key,", "import AuthorizationFailure, IncompatibleData, \\ NotAllowedError, UserNotFound from pymap.health import HealthStatus from pymap.interfaces.backend import", "stack.enter_context(closing(redis)) return redis async def start(self, stack: AsyncExitStack) -> None: config = self._config", "connect_redis self._tokens = AllTokens() @property def tokens(self) -> TokensInterface: return self._tokens async def", "class RedisBackend(BackendInterface): \"\"\"Defines a backend that uses redis data structures for mailbox storage.", "metadata.role await metadata.check_password(credentials, token_key=token_key) if role != 'admin' and authcid != credentials.identity: raise", "Namespace, *, address: str, select: Optional[int], separator: bytes, prefix: bytes, users_prefix: bytes, users_json:", "@property def config(self) -> IMAPConfig: return self._config @property def mailbox_set(self) -> MailboxSet: return", "Args: args: The command-line arguments. address: The redis server address. select: The redis", "args.separator.encode('utf-8'), 'prefix': args.prefix.encode('utf-8'), 'users_prefix': args.users_prefix.encode('utf-8'), 'users_json': args.users_json} class Session(BaseSession[Message]): \"\"\"The session implementation for", "user lookup key contains a JSON object with a ``\"password\"`` attribute, instead of", "raise UserNotFound(self.name) data_dict = json.loads(json_data) else: data_dict = await redis.hgetall(user_key, encoding='utf-8') if data_dict", "from collections.abc import Awaitable, Callable, Mapping, AsyncIterator from contextlib import closing, asynccontextmanager, AsyncExitStack", "AuthenticationCredentials) \\ -> Identity: config = self._config redis = await self._connect_redis() authcid =", "MailboxSet from ..session import BaseSession __all__ = ['RedisBackend', 'Config', 'Session'] class RedisBackend(BackendInterface): \"\"\"Defines", "that uses redis data structures for mailbox storage. \"\"\" def __init__(self, login: Login,", "Config, mailbox_set: MailboxSet, filter_set: FilterSet) -> None: super().__init__(owner) self._redis = redis self._config =", "def separator(self) -> bytes: \"\"\"The bytestring used to separate segments of composite redis", "redis database for mail data') parser.add_argument('--separator', metavar='CHAR', default='/', help='the redis key segment separator')", "metadata = UserMetadata(config) if 'key' in metadata.params: token_key = bytes.fromhex(metadata.params['key']) role = role", "partial from secrets import token_bytes from typing import Any, Optional, Final from aioredis", "args: The command-line arguments. address: The redis server address. select: The redis database", "def new_token(self, *, expiration: datetime = None) -> Optional[str]: metadata = await self.get()", "user_key) _, ns_val = await multi.execute() version, namespace = ns_val.split(b'/', 1) if int(version)", "from pymap.context import connection_exit from pymap.exceptions import AuthorizationFailure, IncompatibleData, \\ NotAllowedError, UserNotFound from", "% (DATA_VERSION, new_namespace) multi = redis.multi_exec() multi.hsetnx(global_keys.namespaces, user_key, ns_val) multi.hget(global_keys.namespaces, user_key) _, ns_val", "redis.multi_exec() multi.hsetnx(global_keys.namespaces, user_key, ns_val) multi.hget(global_keys.namespaces, user_key) _, ns_val = await multi.execute() version, namespace", "multi.delete(user_key) multi.hmset_dict(user_key, user_dict) await multi.execute() async def delete(self) -> None: config = self.config", "database for mail data. If given, the `SELECT`_ command is called after successful", "return self._status @classmethod def add_subparser(cls, name: str, subparsers: Any) -> ArgumentParser: parser =", "import Message, MailboxSet from ..session import BaseSession __all__ = ['RedisBackend', 'Config', 'Session'] class", "@property def redis(self) -> Redis: redis = self._redis if redis is None: #", "@property def backend_capability(self) -> BackendCapability: return BackendCapability(idle=True, object_id=True, multi_append=True) @property def address(self) ->", "self._tokens = AllTokens() @property def tokens(self) -> TokensInterface: return self._tokens async def authenticate(self,", "AuthenticationCredentials from pymap.bytes import BytesFormat from pymap.config import BackendCapability, IMAPConfig from pymap.context import", "self.config: Final = config self.tokens: Final = tokens self._redis: Optional[Redis] = redis self._name", "def set(self, metadata: UserMetadata) -> None: config = self.config redis = self.redis if", "self._address @property def select(self) -> Optional[int]: \"\"\"The redis database for mail data. If", "Login: return self._login @property def config(self) -> Config: return self._config @property def status(self)", "status: HealthStatus) -> Redis: try: redis = await create_redis(config.address) except (ConnectionClosedError, OSError): status.set_unhealthy()", "HealthStatus) -> Redis: try: redis = await create_redis(config.address) except (ConnectionClosedError, OSError): status.set_unhealthy() raise", "redis = await create_redis(config.address) except (ConnectionClosedError, OSError): status.set_unhealthy() raise else: status.set_healthy() stack =", "instead of a redis hash with a ``password`` key. See Also: `redis hashes", "-> TokensInterface: return self._tokens async def authenticate(self, credentials: AuthenticationCredentials) \\ -> Identity: config", "return parser @classmethod async def init(cls, args: Namespace, **overrides: Any) \\ -> tuple[RedisBackend,", "import uuid from argparse import ArgumentParser, Namespace from collections.abc import Awaitable, Callable, Mapping,", "self.tokens, redis, authcid) metadata = await authcid_identity.get() except UserNotFound: metadata = UserMetadata(config) if", "self._filter_set = filter_set @property def config(self) -> IMAPConfig: return self._config @property def mailbox_set(self)", "{}) return GlobalKeys(key) @classmethod def parse_args(cls, args: Namespace) -> Mapping[str, Any]: return {**super().parse_args(args),", "separator: The redis key segment separator. prefix: The prefix for mail data keys.", "def name(self) -> str: return self._name @property def redis(self) -> Redis: redis =", "return cls(login, config, status), config @classmethod async def _connect_redis(cls, config: Config, status: HealthStatus)", "hash with a ``password`` key. See Also: `redis hashes <https://redis.io/topics/data-types-intro#redis-hashes>`_ \"\"\" return self._users_json", "from pymap.health import HealthStatus from pymap.interfaces.backend import BackendInterface from pymap.interfaces.login import LoginInterface, IdentityInterface", "config self._status = status @property def login(self) -> Login: return self._login @property def", "'admin' try: authcid_identity = Identity(config, self.tokens, redis, authcid) metadata = await authcid_identity.get() except", "self.name.encode('utf-8') user_key = self.config._users_root.end(user_bytes) if self.config.users_json: json_data = await redis.get(user_key) if json_data is", "import DATA_VERSION, RedisKey, GlobalKeys, CleanupKeys, \\ NamespaceKeys from .mailbox import Message, MailboxSet from", "return self._separator @property def prefix(self) -> bytes: \"\"\"The prefix for mail data keys.", "collections.abc import Awaitable, Callable, Mapping, AsyncIterator from contextlib import closing, asynccontextmanager, AsyncExitStack from", "= tokens self._redis: Optional[Redis] = redis self._name = name self._role = role @property", "database for mail data') parser.add_argument('--separator', metavar='CHAR', default='/', help='the redis key segment separator') parser.add_argument('--prefix',", "redis async def new_token(self, *, expiration: datetime = None) -> Optional[str]: metadata =", "return namespace async def get(self) -> UserMetadata: redis = self.redis user_bytes = self.name.encode('utf-8')", "config, status), config @classmethod async def _connect_redis(cls, config: Config, status: HealthStatus) -> Redis:", "namespace async def get(self) -> UserMetadata: redis = self.redis user_bytes = self.name.encode('utf-8') user_key", "@property def _users_root(self) -> RedisKey: return RedisKey(self._joiner, [self.users_prefix], {}) @property def _global_keys(self) ->", "args: Namespace) -> Mapping[str, Any]: return {**super().parse_args(args), 'address': args.address, 'select': args.select, 'separator': args.separator.encode('utf-8'),", "prefix for mail data keys. This prefix does not apply to :attr:`.users_key`. \"\"\"", "keys. This prefix does not apply to :attr:`.users_key`. \"\"\" return self._prefix @property def", "async def set(self, metadata: UserMetadata) -> None: config = self.config redis = self.redis", "cl_keys) filter_set = FilterSet(redis, ns_keys) try: await mailbox_set.add_mailbox('INBOX') except ValueError: pass yield Session(redis,", "self.get() if 'key' not in metadata.params: return None key = bytes.fromhex(metadata.params['key']) return self.tokens.get_login_token(self.name,", "BytesFormat: return BytesFormat(self.separator) @property def _users_root(self) -> RedisKey: return RedisKey(self._joiner, [self.users_prefix], {}) @property", "help='the redis server address') parser.add_argument('--select', metavar='DB', type=int, help='the redis database for mail data')", "return None key = bytes.fromhex(metadata.params['key']) return self.tokens.get_login_token(self.name, key) @asynccontextmanager async def new_session(self) ->", "parser.add_argument('--users-prefix', metavar='VAL', default='/users', help='the user lookup key prefix') parser.add_argument('--users-json', action='store_true', help='the user lookup", "metadata.params: return None key = bytes.fromhex(metadata.params['key']) return self.tokens.get_login_token(self.name, key) @asynccontextmanager async def new_session(self)", "users_prefix: bytes, users_json: bool, **extra: Any) -> None: super().__init__(args, admin_key=token_bytes(), **extra) self._address =", "pymap.exceptions import AuthorizationFailure, IncompatibleData, \\ NotAllowedError, UserNotFound from pymap.health import HealthStatus from pymap.interfaces.backend", "__init__(self, config: Config, connect_redis: Callable[[], Awaitable[Redis]]) -> None: super().__init__() self._config = config self._connect_redis", "config._global_keys connect_redis = partial(self._connect_redis, config, self._status) cleanup_task = CleanupTask(connect_redis, global_keys).start() stack.callback(cleanup_task.cancel) class Config(IMAPConfig):", "# may have called SELECT on the connection. raise RuntimeError() return redis async", "global_keys, self.name) ns_keys = NamespaceKeys(global_keys, namespace) cl_keys = CleanupKeys(global_keys) mailbox_set = MailboxSet(redis, ns_keys,", "await mailbox_set.add_mailbox('INBOX') except ValueError: pass yield Session(redis, self.name, config, mailbox_set, filter_set) async def", "separator') parser.add_argument('--prefix', metavar='VAL', default='/mail', help='the mail data key prefix') parser.add_argument('--users-prefix', metavar='VAL', default='/users', help='the", "-> Redis: redis = self._redis if redis is None: # Other methods may", "Mapping[str, Any]: return {**super().parse_args(args), 'address': args.address, 'select': args.select, 'separator': args.separator.encode('utf-8'), 'prefix': args.prefix.encode('utf-8'), 'users_prefix':", "<https://redis.io/topics/data-types-intro#redis-hashes>`_ \"\"\" return self._users_json @property def _joiner(self) -> BytesFormat: return BytesFormat(self.separator) @property def", "= filter_set @property def config(self) -> IMAPConfig: return self._config @property def mailbox_set(self) ->", "= credentials.identity role = 'admin' try: authcid_identity = Identity(config, self.tokens, redis, authcid) metadata", "self.tokens: Final = tokens self._redis: Optional[Redis] = redis self._name = name self._role =", "role) class Identity(IdentityInterface): \"\"\"The identity implementation for the redis backend.\"\"\" def __init__(self, config:", "def __init__(self, config: Config, connect_redis: Callable[[], Awaitable[Redis]]) -> None: super().__init__() self._config = config", "annotations import json import uuid from argparse import ArgumentParser, Namespace from collections.abc import", "= credentials.authcid token_key: Optional[bytes] = None role: Optional[str] = None if credentials.authcid_type ==", "users_json(self) -> bool: \"\"\"True if the value from the user lookup key contains", "raise else: status.set_healthy() stack = connection_exit.get() stack.enter_context(closing(redis)) return redis async def start(self, stack:", "mailbox_set = MailboxSet(redis, ns_keys, cl_keys) filter_set = FilterSet(redis, ns_keys) try: await mailbox_set.add_mailbox('INBOX') except", "import BytesFormat from pymap.config import BackendCapability, IMAPConfig from pymap.context import connection_exit from pymap.exceptions", "def address(self) -> str: \"\"\"The redis server address. Defaults to a connection to", "key. See Also: `redis hashes <https://redis.io/topics/data-types-intro#redis-hashes>`_ \"\"\" return self._users_json @property def _joiner(self) ->", "class Login(LoginInterface): \"\"\"The login implementation for the redis backend.\"\"\" def __init__(self, config: Config,", "filter_set = FilterSet(redis, ns_keys) try: await mailbox_set.add_mailbox('INBOX') except ValueError: pass yield Session(redis, self.name,", "BytesFormat(self.separator) @property def _users_root(self) -> RedisKey: return RedisKey(self._joiner, [self.users_prefix], {}) @property def _global_keys(self)", "The user lookup key prefix. users_json: True if the user lookup value contains", "import LoginInterface, IdentityInterface from pymap.interfaces.token import TokensInterface from pymap.token import AllTokens from pymap.user", "ns_keys, cl_keys) filter_set = FilterSet(redis, ns_keys) try: await mailbox_set.add_mailbox('INBOX') except ValueError: pass yield", "self.config redis = self.redis self._redis = None if config.select is not None: await", "parser = subparsers.add_parser(name, help='redis backend') parser.add_argument('--address', metavar='URL', default='redis://localhost', help='the redis server address') parser.add_argument('--select',", "None) -> None: super().__init__() self.config: Final = config self.tokens: Final = tokens self._redis:", "tuple[RedisBackend, Config]: config = Config.from_args(args) status = HealthStatus() connect_redis = partial(cls._connect_redis, config, status)", "config: Config, mailbox_set: MailboxSet, filter_set: FilterSet) -> None: super().__init__(owner) self._redis = redis self._config", "Awaitable, Callable, Mapping, AsyncIterator from contextlib import closing, asynccontextmanager, AsyncExitStack from datetime import", "or metadata.role await metadata.check_password(credentials, token_key=token_key) if role != 'admin' and authcid != credentials.identity:", "import Any, Optional, Final from aioredis import create_redis, Redis, ConnectionClosedError from pysasl.creds import", "return self._address @property def select(self) -> Optional[int]: \"\"\"The redis database for mail data.", "redis.get(user_key) if json_data is None: raise UserNotFound(self.name) data_dict = json.loads(json_data) else: data_dict =", "return Identity(config, self.tokens, redis, credentials.identity, role) class Identity(IdentityInterface): \"\"\"The identity implementation for the", "\"\"\"The prefix for mail data keys. This prefix does not apply to :attr:`.users_key`.", "pymap.context import connection_exit from pymap.exceptions import AuthorizationFailure, IncompatibleData, \\ NotAllowedError, UserNotFound from pymap.health", "= NamespaceKeys(global_keys, namespace) cl_keys = CleanupKeys(global_keys) mailbox_set = MailboxSet(redis, ns_keys, cl_keys) filter_set =", "-> AsyncIterator[Session]: config = self.config redis = self.redis self._redis = None if config.select", ".cleanup import CleanupTask from .filter import FilterSet from .keys import DATA_VERSION, RedisKey, GlobalKeys,", "-> tuple[RedisBackend, Config]: config = Config.from_args(args) status = HealthStatus() connect_redis = partial(cls._connect_redis, config,", "argparse import ArgumentParser, Namespace from collections.abc import Awaitable, Callable, Mapping, AsyncIterator from contextlib", "= self.config redis = self.redis self._redis = None if config.select is not None:", "**extra: Any) -> None: super().__init__(args, admin_key=token_bytes(), **extra) self._address = address self._select = select", "except UserNotFound: metadata = UserMetadata(config) if 'key' in metadata.params: token_key = bytes.fromhex(metadata.params['key']) role", "prefix does not apply to :attr:`.users_key`. \"\"\" return self._prefix @property def users_prefix(self) ->", "arguments. address: The redis server address. select: The redis database for mail data.", "Identity: config = self._config redis = await self._connect_redis() authcid = credentials.authcid token_key: Optional[bytes]", "multi_append=True) @property def address(self) -> str: \"\"\"The redis server address. Defaults to a", "return self._config @property def status(self) -> HealthStatus: return self._status @classmethod def add_subparser(cls, name:", "composite redis keys.\"\"\" return self._separator @property def prefix(self) -> bytes: \"\"\"The prefix for", "ns_val) multi.hget(global_keys.namespaces, user_key) _, ns_val = await multi.execute() version, namespace = ns_val.split(b'/', 1)", "= await multi.execute() version, namespace = ns_val.split(b'/', 1) if int(version) != DATA_VERSION: raise", "await multi.execute() version, namespace = ns_val.split(b'/', 1) if int(version) != DATA_VERSION: raise IncompatibleData()", "= self.name.encode('utf-8') user_key = self.config._users_root.end(user_bytes) if self.config.users_json: json_data = await redis.get(user_key) if json_data", "AsyncIterator from contextlib import closing, asynccontextmanager, AsyncExitStack from datetime import datetime from functools", "config = Config.from_args(args) status = HealthStatus() connect_redis = partial(cls._connect_redis, config, status) login =", "\"\"\"The config implementation for the redis backend. Args: args: The command-line arguments. address:", "login self._config = config self._status = status @property def login(self) -> Login: return", "Any]: return {**super().parse_args(args), 'address': args.address, 'select': args.select, 'separator': args.separator.encode('utf-8'), 'prefix': args.prefix.encode('utf-8'), 'users_prefix': args.users_prefix.encode('utf-8'),", "mailbox_set(self) -> MailboxSet: return self._mailbox_set @property def filter_set(self) -> FilterSet: return self._filter_set class", "Redis: redis = self._redis if redis is None: # Other methods may not", "return self.tokens.get_login_token(self.name, key) @asynccontextmanager async def new_session(self) -> AsyncIterator[Session]: config = self.config redis", "multi = redis.multi_exec() multi.hsetnx(global_keys.namespaces, user_key, ns_val) multi.hget(global_keys.namespaces, user_key) _, ns_val = await multi.execute()", ":attr:`.users_key`. \"\"\" return self._prefix @property def users_prefix(self) -> bytes: \"\"\"The prefix for user", "from pymap.user import UserMetadata from .cleanup import CleanupTask from .filter import FilterSet from", "a ``password`` key. See Also: `redis hashes <https://redis.io/topics/data-types-intro#redis-hashes>`_ \"\"\" return self._users_json @property def", "Any) \\ -> tuple[RedisBackend, Config]: config = Config.from_args(args) status = HealthStatus() connect_redis =", "data key prefix') parser.add_argument('--users-prefix', metavar='VAL', default='/users', help='the user lookup key prefix') parser.add_argument('--users-json', action='store_true',", "address. Defaults to a connection to localhost. See Also: :func:`aioredis.create_connection` \"\"\" return self._address", ".. _SELECT: https://redis.io/commands/select \"\"\" return self._select @property def separator(self) -> bytes: \"\"\"The bytestring", "prefix for user lookup keys.\"\"\" return self._users_prefix @property def users_json(self) -> bool: \"\"\"True", "Redis, name: str, role: str = None) -> None: super().__init__() self.config: Final =", "bytes: \"\"\"The prefix for user lookup keys.\"\"\" return self._users_prefix @property def users_json(self) ->", "parser.add_argument('--users-json', action='store_true', help='the user lookup value contains JSON') return parser @classmethod async def", "!= DATA_VERSION: raise IncompatibleData() return namespace async def get(self) -> UserMetadata: redis =", "def users_json(self) -> bool: \"\"\"True if the value from the user lookup key", "metavar='URL', default='redis://localhost', help='the redis server address') parser.add_argument('--select', metavar='DB', type=int, help='the redis database for", "key) @asynccontextmanager async def new_session(self) -> AsyncIterator[Session]: config = self.config redis = self.redis", "connection to localhost. See Also: :func:`aioredis.create_connection` \"\"\" return self._address @property def select(self) ->", "namespace) cl_keys = CleanupKeys(global_keys) mailbox_set = MailboxSet(redis, ns_keys, cl_keys) filter_set = FilterSet(redis, ns_keys)", "_connect_redis(cls, config: Config, status: HealthStatus) -> Redis: try: redis = await create_redis(config.address) except", "return redis async def new_token(self, *, expiration: datetime = None) -> Optional[str]: metadata", "structures for mailbox storage. \"\"\" def __init__(self, login: Login, config: Config, status: HealthStatus)", "str) -> bytes: user_key = user.encode('utf-8') new_namespace = uuid.uuid4().hex.encode('ascii') ns_val = b'%d/%b' %", "bytes: \"\"\"The prefix for mail data keys. This prefix does not apply to", "multi.execute() async def delete(self) -> None: config = self.config user_key = config._users_root.end(self.name.encode('utf-8')) if", "None: super().__init__() self.config: Final = config self.tokens: Final = tokens self._redis: Optional[Redis] =", "users_json: True if the user lookup value contains JSON. \"\"\" def __init__(self, args:", "config, mailbox_set, filter_set) async def _get_namespace(self, redis: Redis, global_keys: GlobalKeys, user: str) ->", "return self._users_json @property def _joiner(self) -> BytesFormat: return BytesFormat(self.separator) @property def _users_root(self) ->", "parser @classmethod async def init(cls, args: Namespace, **overrides: Any) \\ -> tuple[RedisBackend, Config]:", "user lookup value contains JSON. \"\"\" def __init__(self, args: Namespace, *, address: str,", "is None: raise UserNotFound(self.name) data_dict = json.loads(json_data) else: data_dict = await redis.hgetall(user_key, encoding='utf-8')", "connection_exit.get() stack.enter_context(closing(redis)) return redis async def start(self, stack: AsyncExitStack) -> None: config =", "users_prefix: The user lookup key prefix. users_json: True if the user lookup value", "prefix(self) -> bytes: \"\"\"The prefix for mail data keys. This prefix does not", "super().__init__(args, admin_key=token_bytes(), **extra) self._address = address self._select = select self._separator = separator self._prefix", "\"\"\" def __init__(self, login: Login, config: Config, status: HealthStatus) -> None: super().__init__() self._login", "Config, status: HealthStatus) -> None: super().__init__() self._login = login self._config = config self._status", "redis.select(config.select) global_keys = config._global_keys namespace = await self._get_namespace(redis, global_keys, self.name) ns_keys = NamespaceKeys(global_keys,", "*, expiration: datetime = None) -> Optional[str]: metadata = await self.get() if 'key'", "lookup keys.\"\"\" return self._users_prefix @property def users_json(self) -> bool: \"\"\"True if the value", "redis database for mail data. separator: The redis key segment separator. prefix: The", "to separate segments of composite redis keys.\"\"\" return self._separator @property def prefix(self) ->", "contextlib import closing, asynccontextmanager, AsyncExitStack from datetime import datetime from functools import partial", "implementation for the redis backend. Args: args: The command-line arguments. address: The redis", "for mail data keys. This prefix does not apply to :attr:`.users_key`. \"\"\" return", "else: multi = redis.multi_exec() multi.delete(user_key) multi.hmset_dict(user_key, user_dict) await multi.execute() async def delete(self) ->", "redis backend.\"\"\" resource = __name__ def __init__(self, redis: Redis, owner: str, config: Config,", "backend_capability(self) -> BackendCapability: return BackendCapability(idle=True, object_id=True, multi_append=True) @property def address(self) -> str: \"\"\"The", "import TokensInterface from pymap.token import AllTokens from pymap.user import UserMetadata from .cleanup import", "args.prefix.encode('utf-8'), 'users_prefix': args.users_prefix.encode('utf-8'), 'users_json': args.users_json} class Session(BaseSession[Message]): \"\"\"The session implementation for the redis", "separator. prefix: The prefix for mail data keys. users_prefix: The user lookup key", "have called SELECT on the connection. raise RuntimeError() return redis async def new_token(self,", "address') parser.add_argument('--select', metavar='DB', type=int, help='the redis database for mail data') parser.add_argument('--separator', metavar='CHAR', default='/',", "_global_keys(self) -> GlobalKeys: key = RedisKey(self._joiner, [self.prefix], {}) return GlobalKeys(key) @classmethod def parse_args(cls,", "Session(redis, self.name, config, mailbox_set, filter_set) async def _get_namespace(self, redis: Redis, global_keys: GlobalKeys, user:", "self.redis user_bytes = self.name.encode('utf-8') user_key = self.config._users_root.end(user_bytes) if self.config.users_json: json_data = await redis.get(user_key)", "def new_session(self) -> AsyncIterator[Session]: config = self.config redis = self.redis self._redis = None", "None: raise UserNotFound(self.name) return UserMetadata(self.config, **data_dict) async def set(self, metadata: UserMetadata) -> None:", "separator(self) -> bytes: \"\"\"The bytestring used to separate segments of composite redis keys.\"\"\"", "self._address = address self._select = select self._separator = separator self._prefix = prefix self._users_prefix", "json import uuid from argparse import ArgumentParser, Namespace from collections.abc import Awaitable, Callable,", "@asynccontextmanager async def new_session(self) -> AsyncIterator[Session]: config = self.config redis = self.redis self._redis", "Redis, owner: str, config: Config, mailbox_set: MailboxSet, filter_set: FilterSet) -> None: super().__init__(owner) self._redis", "HealthStatus() connect_redis = partial(cls._connect_redis, config, status) login = Login(config, connect_redis) return cls(login, config,", "from aioredis import create_redis, Redis, ConnectionClosedError from pysasl.creds import AuthenticationCredentials from pymap.bytes import", "object with a ``\"password\"`` attribute, instead of a redis hash with a ``password``", "global_keys = config._global_keys namespace = await self._get_namespace(redis, global_keys, self.name) ns_keys = NamespaceKeys(global_keys, namespace)", "'key' in metadata.params: token_key = bytes.fromhex(metadata.params['key']) role = role or metadata.role await metadata.check_password(credentials,", "role = role or metadata.role await metadata.check_password(credentials, token_key=token_key) if role != 'admin' and", "uuid.uuid4().hex.encode('ascii') ns_val = b'%d/%b' % (DATA_VERSION, new_namespace) multi = redis.multi_exec() multi.hsetnx(global_keys.namespaces, user_key, ns_val)", "str: \"\"\"The redis server address. Defaults to a connection to localhost. See Also:", "-> MailboxSet: return self._mailbox_set @property def filter_set(self) -> FilterSet: return self._filter_set class Login(LoginInterface):", "UserMetadata from .cleanup import CleanupTask from .filter import FilterSet from .keys import DATA_VERSION,", "-> None: config = self.config redis = self.redis if self._role != 'admin' and", "json.dumps(user_dict) await redis.set(user_key, json_data) else: multi = redis.multi_exec() multi.delete(user_key) multi.hmset_dict(user_key, user_dict) await multi.execute()", "self._config global_keys = config._global_keys connect_redis = partial(self._connect_redis, config, self._status) cleanup_task = CleanupTask(connect_redis, global_keys).start()", "json_data) else: multi = redis.multi_exec() multi.delete(user_key) multi.hmset_dict(user_key, user_dict) await multi.execute() async def delete(self)", "config, status) login = Login(config, connect_redis) return cls(login, config, status), config @classmethod async", "Also: :func:`aioredis.create_connection` \"\"\" return self._address @property def select(self) -> Optional[int]: \"\"\"The redis database", "multi.hmset_dict(user_key, user_dict) await multi.execute() async def delete(self) -> None: config = self.config user_key", "action='store_true', help='the user lookup value contains JSON') return parser @classmethod async def init(cls,", "config self._mailbox_set = mailbox_set self._filter_set = filter_set @property def config(self) -> IMAPConfig: return", "pymap.health import HealthStatus from pymap.interfaces.backend import BackendInterface from pymap.interfaces.login import LoginInterface, IdentityInterface from", "in metadata.params: token_key = bytes.fromhex(metadata.params['key']) role = role or metadata.role await metadata.check_password(credentials, token_key=token_key)", "None: config = self.config redis = self.redis if self._role != 'admin' and metadata.role:", "ValueError: pass yield Session(redis, self.name, config, mailbox_set, filter_set) async def _get_namespace(self, redis: Redis,", "\"\"\"The redis database for mail data. If given, the `SELECT`_ command is called", "called after new_session(), since it # may have called SELECT on the connection.", "help='the redis key segment separator') parser.add_argument('--prefix', metavar='VAL', default='/mail', help='the mail data key prefix')", "{}) @property def _global_keys(self) -> GlobalKeys: key = RedisKey(self._joiner, [self.prefix], {}) return GlobalKeys(key)", "-> Optional[str]: metadata = await self.get() if 'key' not in metadata.params: return None", "= config self.tokens: Final = tokens self._redis: Optional[Redis] = redis self._name = name", "prefix. users_json: True if the user lookup value contains JSON. \"\"\" def __init__(self,", "new_namespace = uuid.uuid4().hex.encode('ascii') ns_val = b'%d/%b' % (DATA_VERSION, new_namespace) multi = redis.multi_exec() multi.hsetnx(global_keys.namespaces,", "..session import BaseSession __all__ = ['RedisBackend', 'Config', 'Session'] class RedisBackend(BackendInterface): \"\"\"Defines a backend", "if redis is None: # Other methods may not be called after new_session(),", "None if config.select is not None: await redis.select(config.select) global_keys = config._global_keys namespace =", "str, subparsers: Any) -> ArgumentParser: parser = subparsers.add_parser(name, help='redis backend') parser.add_argument('--address', metavar='URL', default='redis://localhost',", "credentials.identity: raise AuthorizationFailure() return Identity(config, self.tokens, redis, credentials.identity, role) class Identity(IdentityInterface): \"\"\"The identity", "= bytes.fromhex(metadata.params['key']) return self.tokens.get_login_token(self.name, key) @asynccontextmanager async def new_session(self) -> AsyncIterator[Session]: config =", "Login, config: Config, status: HealthStatus) -> None: super().__init__() self._login = login self._config =", "= UserMetadata(config) if 'key' in metadata.params: token_key = bytes.fromhex(metadata.params['key']) role = role or", "config: Config, tokens: TokensInterface, redis: Redis, name: str, role: str = None) ->", "= user.encode('utf-8') new_namespace = uuid.uuid4().hex.encode('ascii') ns_val = b'%d/%b' % (DATA_VERSION, new_namespace) multi =", "await redis.set(user_key, json_data) else: multi = redis.multi_exec() multi.delete(user_key) multi.hmset_dict(user_key, user_dict) await multi.execute() async", "-> None: super().__init__() self._login = login self._config = config self._status = status @property", "_, ns_val = await multi.execute() version, namespace = ns_val.split(b'/', 1) if int(version) !=", "``password`` key. See Also: `redis hashes <https://redis.io/topics/data-types-intro#redis-hashes>`_ \"\"\" return self._users_json @property def _joiner(self)", "pymap.interfaces.token import TokensInterface from pymap.token import AllTokens from pymap.user import UserMetadata from .cleanup", "the `SELECT`_ command is called after successful user lookup. .. _SELECT: https://redis.io/commands/select \"\"\"", "@classmethod def add_subparser(cls, name: str, subparsers: Any) -> ArgumentParser: parser = subparsers.add_parser(name, help='redis", "data keys. This prefix does not apply to :attr:`.users_key`. \"\"\" return self._prefix @property", "super().__init__() self.config: Final = config self.tokens: Final = tokens self._redis: Optional[Redis] = redis", "AuthorizationFailure, IncompatibleData, \\ NotAllowedError, UserNotFound from pymap.health import HealthStatus from pymap.interfaces.backend import BackendInterface", "Optional[bytes] = None role: Optional[str] = None if credentials.authcid_type == 'admin-token': authcid =", "except ValueError: pass yield Session(redis, self.name, config, mailbox_set, filter_set) async def _get_namespace(self, redis:", "user lookup keys.\"\"\" return self._users_prefix @property def users_json(self) -> bool: \"\"\"True if the", "CleanupKeys, \\ NamespaceKeys from .mailbox import Message, MailboxSet from ..session import BaseSession __all__", "def __init__(self, login: Login, config: Config, status: HealthStatus) -> None: super().__init__() self._login =", "def config(self) -> Config: return self._config @property def status(self) -> HealthStatus: return self._status", "import BackendCapability, IMAPConfig from pymap.context import connection_exit from pymap.exceptions import AuthorizationFailure, IncompatibleData, \\", "= config._global_keys namespace = await self._get_namespace(redis, global_keys, self.name) ns_keys = NamespaceKeys(global_keys, namespace) cl_keys", "if 'key' not in metadata.params: return None key = bytes.fromhex(metadata.params['key']) return self.tokens.get_login_token(self.name, key)", "key contains a JSON object with a ``\"password\"`` attribute, instead of a redis", ".filter import FilterSet from .keys import DATA_VERSION, RedisKey, GlobalKeys, CleanupKeys, \\ NamespaceKeys from", ".keys import DATA_VERSION, RedisKey, GlobalKeys, CleanupKeys, \\ NamespaceKeys from .mailbox import Message, MailboxSet", "json_data is None: raise UserNotFound(self.name) data_dict = json.loads(json_data) else: data_dict = await redis.hgetall(user_key,", "data keys. users_prefix: The user lookup key prefix. users_json: True if the user", "= await self._connect_redis() authcid = credentials.authcid token_key: Optional[bytes] = None role: Optional[str] =", "@property def _joiner(self) -> BytesFormat: return BytesFormat(self.separator) @property def _users_root(self) -> RedisKey: return", "def login(self) -> Login: return self._login @property def config(self) -> Config: return self._config", "Mapping, AsyncIterator from contextlib import closing, asynccontextmanager, AsyncExitStack from datetime import datetime from", "self._select @property def separator(self) -> bytes: \"\"\"The bytestring used to separate segments of", "async def authenticate(self, credentials: AuthenticationCredentials) \\ -> Identity: config = self._config redis =", "closing, asynccontextmanager, AsyncExitStack from datetime import datetime from functools import partial from secrets", "BaseSession __all__ = ['RedisBackend', 'Config', 'Session'] class RedisBackend(BackendInterface): \"\"\"Defines a backend that uses", "for mail data. If given, the `SELECT`_ command is called after successful user", "Also: `redis hashes <https://redis.io/topics/data-types-intro#redis-hashes>`_ \"\"\" return self._users_json @property def _joiner(self) -> BytesFormat: return", "credentials.identity role = 'admin' try: authcid_identity = Identity(config, self.tokens, redis, authcid) metadata =", "a redis hash with a ``password`` key. See Also: `redis hashes <https://redis.io/topics/data-types-intro#redis-hashes>`_ \"\"\"", "be called after new_session(), since it # may have called SELECT on the", "config(self) -> IMAPConfig: return self._config @property def mailbox_set(self) -> MailboxSet: return self._mailbox_set @property", "-> RedisKey: return RedisKey(self._joiner, [self.users_prefix], {}) @property def _global_keys(self) -> GlobalKeys: key =", "import CleanupTask from .filter import FilterSet from .keys import DATA_VERSION, RedisKey, GlobalKeys, CleanupKeys,", "Session(BaseSession[Message]): \"\"\"The session implementation for the redis backend.\"\"\" resource = __name__ def __init__(self,", "TokensInterface: return self._tokens async def authenticate(self, credentials: AuthenticationCredentials) \\ -> Identity: config =", "connect_redis) return cls(login, config, status), config @classmethod async def _connect_redis(cls, config: Config, status:", "args: Namespace, *, address: str, select: Optional[int], separator: bytes, prefix: bytes, users_prefix: bytes,", "redis: Redis, owner: str, config: Config, mailbox_set: MailboxSet, filter_set: FilterSet) -> None: super().__init__(owner)", "data_dict = await redis.hgetall(user_key, encoding='utf-8') if data_dict is None: raise UserNotFound(self.name) return UserMetadata(self.config,", "BackendCapability(idle=True, object_id=True, multi_append=True) @property def address(self) -> str: \"\"\"The redis server address. Defaults", "= self.config redis = self.redis if self._role != 'admin' and metadata.role: raise NotAllowedError('Cannot", "def delete(self) -> None: config = self.config user_key = config._users_root.end(self.name.encode('utf-8')) if not await", "and metadata.role: raise NotAllowedError('Cannot assign role.') user_key = config._users_root.end(self.name.encode('utf-8')) user_dict = metadata.to_dict(key=token_bytes().hex()) if", "import BaseSession __all__ = ['RedisBackend', 'Config', 'Session'] class RedisBackend(BackendInterface): \"\"\"Defines a backend that", "super().__init__() self._config = config self._connect_redis = connect_redis self._tokens = AllTokens() @property def tokens(self)", "yield Session(redis, self.name, config, mailbox_set, filter_set) async def _get_namespace(self, redis: Redis, global_keys: GlobalKeys,", "name(self) -> str: return self._name @property def redis(self) -> Redis: redis = self._redis", "from contextlib import closing, asynccontextmanager, AsyncExitStack from datetime import datetime from functools import", "= login self._config = config self._status = status @property def login(self) -> Login:", "self._redis = redis self._config = config self._mailbox_set = mailbox_set self._filter_set = filter_set @property", "= await redis.get(user_key) if json_data is None: raise UserNotFound(self.name) data_dict = json.loads(json_data) else:", "self._select = select self._separator = separator self._prefix = prefix self._users_prefix = users_prefix self._users_json", "from .cleanup import CleanupTask from .filter import FilterSet from .keys import DATA_VERSION, RedisKey,", "role: str = None) -> None: super().__init__() self.config: Final = config self.tokens: Final", "redis data structures for mailbox storage. \"\"\" def __init__(self, login: Login, config: Config,", "FilterSet from .keys import DATA_VERSION, RedisKey, GlobalKeys, CleanupKeys, \\ NamespaceKeys from .mailbox import", "= connection_exit.get() stack.enter_context(closing(redis)) return redis async def start(self, stack: AsyncExitStack) -> None: config", "async def init(cls, args: Namespace, **overrides: Any) \\ -> tuple[RedisBackend, Config]: config =", "return redis async def start(self, stack: AsyncExitStack) -> None: config = self._config global_keys", "Config(IMAPConfig): \"\"\"The config implementation for the redis backend. Args: args: The command-line arguments.", "tokens: TokensInterface, redis: Redis, name: str, role: str = None) -> None: super().__init__()", "namespace = ns_val.split(b'/', 1) if int(version) != DATA_VERSION: raise IncompatibleData() return namespace async", "The command-line arguments. address: The redis server address. select: The redis database for", "-> str: return self._name @property def redis(self) -> Redis: redis = self._redis if", "login(self) -> Login: return self._login @property def config(self) -> Config: return self._config @property", "from __future__ import annotations import json import uuid from argparse import ArgumentParser, Namespace", "version, namespace = ns_val.split(b'/', 1) if int(version) != DATA_VERSION: raise IncompatibleData() return namespace", "self._config = config self._status = status @property def login(self) -> Login: return self._login", "from argparse import ArgumentParser, Namespace from collections.abc import Awaitable, Callable, Mapping, AsyncIterator from", "try: redis = await create_redis(config.address) except (ConnectionClosedError, OSError): status.set_unhealthy() raise else: status.set_healthy() stack", "prefix: bytes, users_prefix: bytes, users_json: bool, **extra: Any) -> None: super().__init__(args, admin_key=token_bytes(), **extra)", "def authenticate(self, credentials: AuthenticationCredentials) \\ -> Identity: config = self._config redis = await", "await authcid_identity.get() except UserNotFound: metadata = UserMetadata(config) if 'key' in metadata.params: token_key =", "is None: raise UserNotFound(self.name) return UserMetadata(self.config, **data_dict) async def set(self, metadata: UserMetadata) ->", "status @property def login(self) -> Login: return self._login @property def config(self) -> Config:", "data_dict is None: raise UserNotFound(self.name) return UserMetadata(self.config, **data_dict) async def set(self, metadata: UserMetadata)", "raise IncompatibleData() return namespace async def get(self) -> UserMetadata: redis = self.redis user_bytes", "config(self) -> Config: return self._config @property def status(self) -> HealthStatus: return self._status @classmethod", "prefix') parser.add_argument('--users-prefix', metavar='VAL', default='/users', help='the user lookup key prefix') parser.add_argument('--users-json', action='store_true', help='the user", "token_key=token_key) if role != 'admin' and authcid != credentials.identity: raise AuthorizationFailure() return Identity(config,", "pass yield Session(redis, self.name, config, mailbox_set, filter_set) async def _get_namespace(self, redis: Redis, global_keys:", "if config.select is not None: await redis.select(config.select) global_keys = config._global_keys namespace = await", "This prefix does not apply to :attr:`.users_key`. \"\"\" return self._prefix @property def users_prefix(self)", "-> FilterSet: return self._filter_set class Login(LoginInterface): \"\"\"The login implementation for the redis backend.\"\"\"", "stack: AsyncExitStack) -> None: config = self._config global_keys = config._global_keys connect_redis = partial(self._connect_redis,", "connection. raise RuntimeError() return redis async def new_token(self, *, expiration: datetime = None)", "= ns_val.split(b'/', 1) if int(version) != DATA_VERSION: raise IncompatibleData() return namespace async def", "mailbox storage. \"\"\" def __init__(self, login: Login, config: Config, status: HealthStatus) -> None:", "redis key segment separator') parser.add_argument('--prefix', metavar='VAL', default='/mail', help='the mail data key prefix') parser.add_argument('--users-prefix',", "pymap.interfaces.backend import BackendInterface from pymap.interfaces.login import LoginInterface, IdentityInterface from pymap.interfaces.token import TokensInterface from", "-> HealthStatus: return self._status @classmethod def add_subparser(cls, name: str, subparsers: Any) -> ArgumentParser:", "default='/users', help='the user lookup key prefix') parser.add_argument('--users-json', action='store_true', help='the user lookup value contains", "Config.from_args(args) status = HealthStatus() connect_redis = partial(cls._connect_redis, config, status) login = Login(config, connect_redis)", "= subparsers.add_parser(name, help='redis backend') parser.add_argument('--address', metavar='URL', default='redis://localhost', help='the redis server address') parser.add_argument('--select', metavar='DB',", "import BackendInterface from pymap.interfaces.login import LoginInterface, IdentityInterface from pymap.interfaces.token import TokensInterface from pymap.token", "See Also: :func:`aioredis.create_connection` \"\"\" return self._address @property def select(self) -> Optional[int]: \"\"\"The redis", "NamespaceKeys from .mailbox import Message, MailboxSet from ..session import BaseSession __all__ = ['RedisBackend',", "bytes: user_key = user.encode('utf-8') new_namespace = uuid.uuid4().hex.encode('ascii') ns_val = b'%d/%b' % (DATA_VERSION, new_namespace)", "bool: \"\"\"True if the value from the user lookup key contains a JSON", "ns_keys = NamespaceKeys(global_keys, namespace) cl_keys = CleanupKeys(global_keys) mailbox_set = MailboxSet(redis, ns_keys, cl_keys) filter_set", "AsyncExitStack) -> None: config = self._config global_keys = config._global_keys connect_redis = partial(self._connect_redis, config,", "= redis.multi_exec() multi.hsetnx(global_keys.namespaces, user_key, ns_val) multi.hget(global_keys.namespaces, user_key) _, ns_val = await multi.execute() version,", "None) -> Optional[str]: metadata = await self.get() if 'key' not in metadata.params: return", "is None: # Other methods may not be called after new_session(), since it", "redis server address') parser.add_argument('--select', metavar='DB', type=int, help='the redis database for mail data') parser.add_argument('--separator',", "for mailbox storage. \"\"\" def __init__(self, login: Login, config: Config, status: HealthStatus) ->", "self._login @property def config(self) -> Config: return self._config @property def status(self) -> HealthStatus:", "= users_json @property def backend_capability(self) -> BackendCapability: return BackendCapability(idle=True, object_id=True, multi_append=True) @property def", "= address self._select = select self._separator = separator self._prefix = prefix self._users_prefix =", "database for mail data. separator: The redis key segment separator. prefix: The prefix", "parser.add_argument('--separator', metavar='CHAR', default='/', help='the redis key segment separator') parser.add_argument('--prefix', metavar='VAL', default='/mail', help='the mail", "'prefix': args.prefix.encode('utf-8'), 'users_prefix': args.users_prefix.encode('utf-8'), 'users_json': args.users_json} class Session(BaseSession[Message]): \"\"\"The session implementation for the", "for the redis backend.\"\"\" def __init__(self, config: Config, connect_redis: Callable[[], Awaitable[Redis]]) -> None:", "redis = self.redis self._redis = None if config.select is not None: await redis.select(config.select)", "@property def filter_set(self) -> FilterSet: return self._filter_set class Login(LoginInterface): \"\"\"The login implementation for", "multi.hsetnx(global_keys.namespaces, user_key, ns_val) multi.hget(global_keys.namespaces, user_key) _, ns_val = await multi.execute() version, namespace =", "NotAllowedError('Cannot assign role.') user_key = config._users_root.end(self.name.encode('utf-8')) user_dict = metadata.to_dict(key=token_bytes().hex()) if self.config.users_json: json_data =", "from pymap.interfaces.token import TokensInterface from pymap.token import AllTokens from pymap.user import UserMetadata from", "hashes <https://redis.io/topics/data-types-intro#redis-hashes>`_ \"\"\" return self._users_json @property def _joiner(self) -> BytesFormat: return BytesFormat(self.separator) @property", "= CleanupTask(connect_redis, global_keys).start() stack.callback(cleanup_task.cancel) class Config(IMAPConfig): \"\"\"The config implementation for the redis backend.", "return self._tokens async def authenticate(self, credentials: AuthenticationCredentials) \\ -> Identity: config = self._config", "MailboxSet(redis, ns_keys, cl_keys) filter_set = FilterSet(redis, ns_keys) try: await mailbox_set.add_mailbox('INBOX') except ValueError: pass", "Config: return self._config @property def status(self) -> HealthStatus: return self._status @classmethod def add_subparser(cls,", "redis(self) -> Redis: redis = self._redis if redis is None: # Other methods", "select: The redis database for mail data. separator: The redis key segment separator.", "Identity(config, self.tokens, redis, credentials.identity, role) class Identity(IdentityInterface): \"\"\"The identity implementation for the redis", "\"\"\"The prefix for user lookup keys.\"\"\" return self._users_prefix @property def users_json(self) -> bool:", "def _get_namespace(self, redis: Redis, global_keys: GlobalKeys, user: str) -> bytes: user_key = user.encode('utf-8')", "ns_val = b'%d/%b' % (DATA_VERSION, new_namespace) multi = redis.multi_exec() multi.hsetnx(global_keys.namespaces, user_key, ns_val) multi.hget(global_keys.namespaces,", "new_namespace) multi = redis.multi_exec() multi.hsetnx(global_keys.namespaces, user_key, ns_val) multi.hget(global_keys.namespaces, user_key) _, ns_val = await", "See Also: `redis hashes <https://redis.io/topics/data-types-intro#redis-hashes>`_ \"\"\" return self._users_json @property def _joiner(self) -> BytesFormat:", "user_key = config._users_root.end(self.name.encode('utf-8')) user_dict = metadata.to_dict(key=token_bytes().hex()) if self.config.users_json: json_data = json.dumps(user_dict) await redis.set(user_key,", "= None if config.select is not None: await redis.select(config.select) global_keys = config._global_keys namespace", "redis.set(user_key, json_data) else: multi = redis.multi_exec() multi.delete(user_key) multi.hmset_dict(user_key, user_dict) await multi.execute() async def", "redis: Redis, name: str, role: str = None) -> None: super().__init__() self.config: Final", "HealthStatus) -> None: super().__init__() self._login = login self._config = config self._status = status", "redis hash with a ``password`` key. See Also: `redis hashes <https://redis.io/topics/data-types-intro#redis-hashes>`_ \"\"\" return", "multi.execute() version, namespace = ns_val.split(b'/', 1) if int(version) != DATA_VERSION: raise IncompatibleData() return", "implementation for the redis backend.\"\"\" resource = __name__ def __init__(self, redis: Redis, owner:", "redis key segment separator. prefix: The prefix for mail data keys. users_prefix: The", "= separator self._prefix = prefix self._users_prefix = users_prefix self._users_json = users_json @property def", "\"\"\" return self._prefix @property def users_prefix(self) -> bytes: \"\"\"The prefix for user lookup", "redis = self._redis if redis is None: # Other methods may not be", "select: Optional[int], separator: bytes, prefix: bytes, users_prefix: bytes, users_json: bool, **extra: Any) ->", "parser.add_argument('--select', metavar='DB', type=int, help='the redis database for mail data') parser.add_argument('--separator', metavar='CHAR', default='/', help='the", "`SELECT`_ command is called after successful user lookup. .. _SELECT: https://redis.io/commands/select \"\"\" return", "\\ -> Identity: config = self._config redis = await self._connect_redis() authcid = credentials.authcid", "for the redis backend. Args: args: The command-line arguments. address: The redis server", "filter_set(self) -> FilterSet: return self._filter_set class Login(LoginInterface): \"\"\"The login implementation for the redis", "FilterSet: return self._filter_set class Login(LoginInterface): \"\"\"The login implementation for the redis backend.\"\"\" def", "Callable[[], Awaitable[Redis]]) -> None: super().__init__() self._config = config self._connect_redis = connect_redis self._tokens =", "def tokens(self) -> TokensInterface: return self._tokens async def authenticate(self, credentials: AuthenticationCredentials) \\ ->", "secrets import token_bytes from typing import Any, Optional, Final from aioredis import create_redis,", "args.select, 'separator': args.separator.encode('utf-8'), 'prefix': args.prefix.encode('utf-8'), 'users_prefix': args.users_prefix.encode('utf-8'), 'users_json': args.users_json} class Session(BaseSession[Message]): \"\"\"The session", "BytesFormat from pymap.config import BackendCapability, IMAPConfig from pymap.context import connection_exit from pymap.exceptions import", "self._connect_redis() authcid = credentials.authcid token_key: Optional[bytes] = None role: Optional[str] = None if", "-> None: super().__init__() self.config: Final = config self.tokens: Final = tokens self._redis: Optional[Redis]", "may have called SELECT on the connection. raise RuntimeError() return redis async def", "Any) -> None: super().__init__(args, admin_key=token_bytes(), **extra) self._address = address self._select = select self._separator", "keys.\"\"\" return self._separator @property def prefix(self) -> bytes: \"\"\"The prefix for mail data", "mailbox_set self._filter_set = filter_set @property def config(self) -> IMAPConfig: return self._config @property def", "# Other methods may not be called after new_session(), since it # may", "`redis hashes <https://redis.io/topics/data-types-intro#redis-hashes>`_ \"\"\" return self._users_json @property def _joiner(self) -> BytesFormat: return BytesFormat(self.separator)", "multi.hget(global_keys.namespaces, user_key) _, ns_val = await multi.execute() version, namespace = ns_val.split(b'/', 1) if", "The redis key segment separator. prefix: The prefix for mail data keys. users_prefix:", "data. separator: The redis key segment separator. prefix: The prefix for mail data", "functools import partial from secrets import token_bytes from typing import Any, Optional, Final", "@property def select(self) -> Optional[int]: \"\"\"The redis database for mail data. If given,", "key segment separator') parser.add_argument('--prefix', metavar='VAL', default='/mail', help='the mail data key prefix') parser.add_argument('--users-prefix', metavar='VAL',", "and authcid != credentials.identity: raise AuthorizationFailure() return Identity(config, self.tokens, redis, credentials.identity, role) class", "-> Optional[int]: \"\"\"The redis database for mail data. If given, the `SELECT`_ command", "_joiner(self) -> BytesFormat: return BytesFormat(self.separator) @property def _users_root(self) -> RedisKey: return RedisKey(self._joiner, [self.users_prefix],", "self._role != 'admin' and metadata.role: raise NotAllowedError('Cannot assign role.') user_key = config._users_root.end(self.name.encode('utf-8')) user_dict", "for user lookup keys.\"\"\" return self._users_prefix @property def users_json(self) -> bool: \"\"\"True if", "metadata = await self.get() if 'key' not in metadata.params: return None key =", "\\ NotAllowedError, UserNotFound from pymap.health import HealthStatus from pymap.interfaces.backend import BackendInterface from pymap.interfaces.login", "Message, MailboxSet from ..session import BaseSession __all__ = ['RedisBackend', 'Config', 'Session'] class RedisBackend(BackendInterface):", "users_json @property def backend_capability(self) -> BackendCapability: return BackendCapability(idle=True, object_id=True, multi_append=True) @property def address(self)", "RedisKey(self._joiner, [self.prefix], {}) return GlobalKeys(key) @classmethod def parse_args(cls, args: Namespace) -> Mapping[str, Any]:", "config.select is not None: await redis.select(config.select) global_keys = config._global_keys namespace = await self._get_namespace(redis,", "name self._role = role @property def name(self) -> str: return self._name @property def", "backend that uses redis data structures for mailbox storage. \"\"\" def __init__(self, login:", "the redis backend.\"\"\" def __init__(self, config: Config, connect_redis: Callable[[], Awaitable[Redis]]) -> None: super().__init__()", "called SELECT on the connection. raise RuntimeError() return redis async def new_token(self, *,", "help='the user lookup key prefix') parser.add_argument('--users-json', action='store_true', help='the user lookup value contains JSON')", "-> str: \"\"\"The redis server address. Defaults to a connection to localhost. See", "bytes.fromhex(metadata.params['key']) return self.tokens.get_login_token(self.name, key) @asynccontextmanager async def new_session(self) -> AsyncIterator[Session]: config = self.config", "def status(self) -> HealthStatus: return self._status @classmethod def add_subparser(cls, name: str, subparsers: Any)", "= self._config redis = await self._connect_redis() authcid = credentials.authcid token_key: Optional[bytes] = None", "import UserMetadata from .cleanup import CleanupTask from .filter import FilterSet from .keys import", "def filter_set(self) -> FilterSet: return self._filter_set class Login(LoginInterface): \"\"\"The login implementation for the", "parser.add_argument('--prefix', metavar='VAL', default='/mail', help='the mail data key prefix') parser.add_argument('--users-prefix', metavar='VAL', default='/users', help='the user", "await self.get() if 'key' not in metadata.params: return None key = bytes.fromhex(metadata.params['key']) return", "= status @property def login(self) -> Login: return self._login @property def config(self) ->", "Config, tokens: TokensInterface, redis: Redis, name: str, role: str = None) -> None:", "not apply to :attr:`.users_key`. \"\"\" return self._prefix @property def users_prefix(self) -> bytes: \"\"\"The", "connect_redis = partial(self._connect_redis, config, self._status) cleanup_task = CleanupTask(connect_redis, global_keys).start() stack.callback(cleanup_task.cancel) class Config(IMAPConfig): \"\"\"The", "def _global_keys(self) -> GlobalKeys: key = RedisKey(self._joiner, [self.prefix], {}) return GlobalKeys(key) @classmethod def", "await redis.hgetall(user_key, encoding='utf-8') if data_dict is None: raise UserNotFound(self.name) return UserMetadata(self.config, **data_dict) async", "config._users_root.end(self.name.encode('utf-8')) user_dict = metadata.to_dict(key=token_bytes().hex()) if self.config.users_json: json_data = json.dumps(user_dict) await redis.set(user_key, json_data) else:", "RedisKey: return RedisKey(self._joiner, [self.users_prefix], {}) @property def _global_keys(self) -> GlobalKeys: key = RedisKey(self._joiner,", "global_keys: GlobalKeys, user: str) -> bytes: user_key = user.encode('utf-8') new_namespace = uuid.uuid4().hex.encode('ascii') ns_val", "return self._filter_set class Login(LoginInterface): \"\"\"The login implementation for the redis backend.\"\"\" def __init__(self,", "return RedisKey(self._joiner, [self.users_prefix], {}) @property def _global_keys(self) -> GlobalKeys: key = RedisKey(self._joiner, [self.prefix],", "ns_keys) try: await mailbox_set.add_mailbox('INBOX') except ValueError: pass yield Session(redis, self.name, config, mailbox_set, filter_set)", "None: raise UserNotFound(self.name) data_dict = json.loads(json_data) else: data_dict = await redis.hgetall(user_key, encoding='utf-8') if", "str: return self._name @property def redis(self) -> Redis: redis = self._redis if redis", "from .mailbox import Message, MailboxSet from ..session import BaseSession __all__ = ['RedisBackend', 'Config',", "json_data = json.dumps(user_dict) await redis.set(user_key, json_data) else: multi = redis.multi_exec() multi.delete(user_key) multi.hmset_dict(user_key, user_dict)", "@property def mailbox_set(self) -> MailboxSet: return self._mailbox_set @property def filter_set(self) -> FilterSet: return", "called after successful user lookup. .. _SELECT: https://redis.io/commands/select \"\"\" return self._select @property def", "authcid = credentials.identity role = 'admin' try: authcid_identity = Identity(config, self.tokens, redis, authcid)", "for mail data') parser.add_argument('--separator', metavar='CHAR', default='/', help='the redis key segment separator') parser.add_argument('--prefix', metavar='VAL',", "if role != 'admin' and authcid != credentials.identity: raise AuthorizationFailure() return Identity(config, self.tokens,", "async def get(self) -> UserMetadata: redis = self.redis user_bytes = self.name.encode('utf-8') user_key =", "if self.config.users_json: json_data = await redis.get(user_key) if json_data is None: raise UserNotFound(self.name) data_dict", "metavar='CHAR', default='/', help='the redis key segment separator') parser.add_argument('--prefix', metavar='VAL', default='/mail', help='the mail data", "return self._name @property def redis(self) -> Redis: redis = self._redis if redis is", "**data_dict) async def set(self, metadata: UserMetadata) -> None: config = self.config redis =", "mailbox_set, filter_set) async def _get_namespace(self, redis: Redis, global_keys: GlobalKeys, user: str) -> bytes:", "cl_keys = CleanupKeys(global_keys) mailbox_set = MailboxSet(redis, ns_keys, cl_keys) filter_set = FilterSet(redis, ns_keys) try:", "user: str) -> bytes: user_key = user.encode('utf-8') new_namespace = uuid.uuid4().hex.encode('ascii') ns_val = b'%d/%b'", "LoginInterface, IdentityInterface from pymap.interfaces.token import TokensInterface from pymap.token import AllTokens from pymap.user import", "ConnectionClosedError from pysasl.creds import AuthenticationCredentials from pymap.bytes import BytesFormat from pymap.config import BackendCapability,", "key prefix') parser.add_argument('--users-prefix', metavar='VAL', default='/users', help='the user lookup key prefix') parser.add_argument('--users-json', action='store_true', help='the", "to localhost. See Also: :func:`aioredis.create_connection` \"\"\" return self._address @property def select(self) -> Optional[int]:", "status) login = Login(config, connect_redis) return cls(login, config, status), config @classmethod async def", "self._separator @property def prefix(self) -> bytes: \"\"\"The prefix for mail data keys. This", "of a redis hash with a ``password`` key. See Also: `redis hashes <https://redis.io/topics/data-types-intro#redis-hashes>`_", "authcid) metadata = await authcid_identity.get() except UserNotFound: metadata = UserMetadata(config) if 'key' in", "redis backend. Args: args: The command-line arguments. address: The redis server address. select:", "\"\"\"True if the value from the user lookup key contains a JSON object", "async def new_session(self) -> AsyncIterator[Session]: config = self.config redis = self.redis self._redis =", "{**super().parse_args(args), 'address': args.address, 'select': args.select, 'separator': args.separator.encode('utf-8'), 'prefix': args.prefix.encode('utf-8'), 'users_prefix': args.users_prefix.encode('utf-8'), 'users_json': args.users_json}", "AllTokens() @property def tokens(self) -> TokensInterface: return self._tokens async def authenticate(self, credentials: AuthenticationCredentials)", "RedisBackend(BackendInterface): \"\"\"Defines a backend that uses redis data structures for mailbox storage. \"\"\"", "with a ``password`` key. See Also: `redis hashes <https://redis.io/topics/data-types-intro#redis-hashes>`_ \"\"\" return self._users_json @property", "Optional[str] = None if credentials.authcid_type == 'admin-token': authcid = credentials.identity role = 'admin'", "partial(self._connect_redis, config, self._status) cleanup_task = CleanupTask(connect_redis, global_keys).start() stack.callback(cleanup_task.cancel) class Config(IMAPConfig): \"\"\"The config implementation", "select self._separator = separator self._prefix = prefix self._users_prefix = users_prefix self._users_json = users_json", "await self._get_namespace(redis, global_keys, self.name) ns_keys = NamespaceKeys(global_keys, namespace) cl_keys = CleanupKeys(global_keys) mailbox_set =", "= b'%d/%b' % (DATA_VERSION, new_namespace) multi = redis.multi_exec() multi.hsetnx(global_keys.namespaces, user_key, ns_val) multi.hget(global_keys.namespaces, user_key)", "-> Identity: config = self._config redis = await self._connect_redis() authcid = credentials.authcid token_key:", "stack = connection_exit.get() stack.enter_context(closing(redis)) return redis async def start(self, stack: AsyncExitStack) -> None:", "not in metadata.params: return None key = bytes.fromhex(metadata.params['key']) return self.tokens.get_login_token(self.name, key) @asynccontextmanager async", "import closing, asynccontextmanager, AsyncExitStack from datetime import datetime from functools import partial from", "asynccontextmanager, AsyncExitStack from datetime import datetime from functools import partial from secrets import", "credentials.authcid token_key: Optional[bytes] = None role: Optional[str] = None if credentials.authcid_type == 'admin-token':", "-> None: super().__init__(args, admin_key=token_bytes(), **extra) self._address = address self._select = select self._separator =", "if the value from the user lookup key contains a JSON object with", "raise AuthorizationFailure() return Identity(config, self.tokens, redis, credentials.identity, role) class Identity(IdentityInterface): \"\"\"The identity implementation", "def start(self, stack: AsyncExitStack) -> None: config = self._config global_keys = config._global_keys connect_redis", "name: str, role: str = None) -> None: super().__init__() self.config: Final = config", "self._get_namespace(redis, global_keys, self.name) ns_keys = NamespaceKeys(global_keys, namespace) cl_keys = CleanupKeys(global_keys) mailbox_set = MailboxSet(redis,", "= self.redis user_bytes = self.name.encode('utf-8') user_key = self.config._users_root.end(user_bytes) if self.config.users_json: json_data = await", "class Session(BaseSession[Message]): \"\"\"The session implementation for the redis backend.\"\"\" resource = __name__ def", "if credentials.authcid_type == 'admin-token': authcid = credentials.identity role = 'admin' try: authcid_identity =", "return BytesFormat(self.separator) @property def _users_root(self) -> RedisKey: return RedisKey(self._joiner, [self.users_prefix], {}) @property def", "user_key = user.encode('utf-8') new_namespace = uuid.uuid4().hex.encode('ascii') ns_val = b'%d/%b' % (DATA_VERSION, new_namespace) multi", "def _joiner(self) -> BytesFormat: return BytesFormat(self.separator) @property def _users_root(self) -> RedisKey: return RedisKey(self._joiner,", "-> BytesFormat: return BytesFormat(self.separator) @property def _users_root(self) -> RedisKey: return RedisKey(self._joiner, [self.users_prefix], {})", "keys. users_prefix: The user lookup key prefix. users_json: True if the user lookup", "self._users_prefix @property def users_json(self) -> bool: \"\"\"True if the value from the user", "after successful user lookup. .. _SELECT: https://redis.io/commands/select \"\"\" return self._select @property def separator(self)", "'separator': args.separator.encode('utf-8'), 'prefix': args.prefix.encode('utf-8'), 'users_prefix': args.users_prefix.encode('utf-8'), 'users_json': args.users_json} class Session(BaseSession[Message]): \"\"\"The session implementation", "@property def users_prefix(self) -> bytes: \"\"\"The prefix for user lookup keys.\"\"\" return self._users_prefix", "global_keys).start() stack.callback(cleanup_task.cancel) class Config(IMAPConfig): \"\"\"The config implementation for the redis backend. Args: args:", "uuid from argparse import ArgumentParser, Namespace from collections.abc import Awaitable, Callable, Mapping, AsyncIterator", "Namespace, **overrides: Any) \\ -> tuple[RedisBackend, Config]: config = Config.from_args(args) status = HealthStatus()", "parse_args(cls, args: Namespace) -> Mapping[str, Any]: return {**super().parse_args(args), 'address': args.address, 'select': args.select, 'separator':", "!= 'admin' and authcid != credentials.identity: raise AuthorizationFailure() return Identity(config, self.tokens, redis, credentials.identity,", "get(self) -> UserMetadata: redis = self.redis user_bytes = self.name.encode('utf-8') user_key = self.config._users_root.end(user_bytes) if", "mail data key prefix') parser.add_argument('--users-prefix', metavar='VAL', default='/users', help='the user lookup key prefix') parser.add_argument('--users-json',", "session implementation for the redis backend.\"\"\" resource = __name__ def __init__(self, redis: Redis,", "__init__(self, config: Config, tokens: TokensInterface, redis: Redis, name: str, role: str = None)", "prefix') parser.add_argument('--users-json', action='store_true', help='the user lookup value contains JSON') return parser @classmethod async", "bool, **extra: Any) -> None: super().__init__(args, admin_key=token_bytes(), **extra) self._address = address self._select =", "\"\"\" return self._users_json @property def _joiner(self) -> BytesFormat: return BytesFormat(self.separator) @property def _users_root(self)", "await redis.select(config.select) global_keys = config._global_keys namespace = await self._get_namespace(redis, global_keys, self.name) ns_keys =", "UserMetadata) -> None: config = self.config redis = self.redis if self._role != 'admin'", "status = HealthStatus() connect_redis = partial(cls._connect_redis, config, status) login = Login(config, connect_redis) return", "redis backend.\"\"\" def __init__(self, config: Config, tokens: TokensInterface, redis: Redis, name: str, role:", "= Identity(config, self.tokens, redis, authcid) metadata = await authcid_identity.get() except UserNotFound: metadata =", "FilterSet(redis, ns_keys) try: await mailbox_set.add_mailbox('INBOX') except ValueError: pass yield Session(redis, self.name, config, mailbox_set,", "config: Config, connect_redis: Callable[[], Awaitable[Redis]]) -> None: super().__init__() self._config = config self._connect_redis =", "'key' not in metadata.params: return None key = bytes.fromhex(metadata.params['key']) return self.tokens.get_login_token(self.name, key) @asynccontextmanager", "prefix for mail data keys. users_prefix: The user lookup key prefix. users_json: True", "contains JSON') return parser @classmethod async def init(cls, args: Namespace, **overrides: Any) \\", "return self._login @property def config(self) -> Config: return self._config @property def status(self) ->", "config self.tokens: Final = tokens self._redis: Optional[Redis] = redis self._name = name self._role", "= await create_redis(config.address) except (ConnectionClosedError, OSError): status.set_unhealthy() raise else: status.set_healthy() stack = connection_exit.get()", "-> None: super().__init__(owner) self._redis = redis self._config = config self._mailbox_set = mailbox_set self._filter_set", "not None: await redis.select(config.select) global_keys = config._global_keys namespace = await self._get_namespace(redis, global_keys, self.name)", "delete(self) -> None: config = self.config user_key = config._users_root.end(self.name.encode('utf-8')) if not await self.redis.delete(user_key):", "object_id=True, multi_append=True) @property def address(self) -> str: \"\"\"The redis server address. Defaults to", "'admin-token': authcid = credentials.identity role = 'admin' try: authcid_identity = Identity(config, self.tokens, redis,", "*, address: str, select: Optional[int], separator: bytes, prefix: bytes, users_prefix: bytes, users_json: bool,", "self._users_json = users_json @property def backend_capability(self) -> BackendCapability: return BackendCapability(idle=True, object_id=True, multi_append=True) @property", "def parse_args(cls, args: Namespace) -> Mapping[str, Any]: return {**super().parse_args(args), 'address': args.address, 'select': args.select,", "return self._config @property def mailbox_set(self) -> MailboxSet: return self._mailbox_set @property def filter_set(self) ->", "backend') parser.add_argument('--address', metavar='URL', default='redis://localhost', help='the redis server address') parser.add_argument('--select', metavar='DB', type=int, help='the redis", "user_key, ns_val) multi.hget(global_keys.namespaces, user_key) _, ns_val = await multi.execute() version, namespace = ns_val.split(b'/',", "IMAPConfig from pymap.context import connection_exit from pymap.exceptions import AuthorizationFailure, IncompatibleData, \\ NotAllowedError, UserNotFound", "\\ -> tuple[RedisBackend, Config]: config = Config.from_args(args) status = HealthStatus() connect_redis = partial(cls._connect_redis,", "'select': args.select, 'separator': args.separator.encode('utf-8'), 'prefix': args.prefix.encode('utf-8'), 'users_prefix': args.users_prefix.encode('utf-8'), 'users_json': args.users_json} class Session(BaseSession[Message]): \"\"\"The", "user_bytes = self.name.encode('utf-8') user_key = self.config._users_root.end(user_bytes) if self.config.users_json: json_data = await redis.get(user_key) if", "self._mailbox_set = mailbox_set self._filter_set = filter_set @property def config(self) -> IMAPConfig: return self._config", "TokensInterface from pymap.token import AllTokens from pymap.user import UserMetadata from .cleanup import CleanupTask", "from the user lookup key contains a JSON object with a ``\"password\"`` attribute,", "def backend_capability(self) -> BackendCapability: return BackendCapability(idle=True, object_id=True, multi_append=True) @property def address(self) -> str:", "raise RuntimeError() return redis async def new_token(self, *, expiration: datetime = None) ->", "config = self.config redis = self.redis if self._role != 'admin' and metadata.role: raise", "-> Config: return self._config @property def status(self) -> HealthStatus: return self._status @classmethod def", "else: status.set_healthy() stack = connection_exit.get() stack.enter_context(closing(redis)) return redis async def start(self, stack: AsyncExitStack)", "backend. Args: args: The command-line arguments. address: The redis server address. select: The", "Awaitable[Redis]]) -> None: super().__init__() self._config = config self._connect_redis = connect_redis self._tokens = AllTokens()", "new_session(self) -> AsyncIterator[Session]: config = self.config redis = self.redis self._redis = None if", "self.redis self._redis = None if config.select is not None: await redis.select(config.select) global_keys =", "typing import Any, Optional, Final from aioredis import create_redis, Redis, ConnectionClosedError from pysasl.creds", "BackendCapability, IMAPConfig from pymap.context import connection_exit from pymap.exceptions import AuthorizationFailure, IncompatibleData, \\ NotAllowedError,", "Login(config, connect_redis) return cls(login, config, status), config @classmethod async def _connect_redis(cls, config: Config,", "= None) -> Optional[str]: metadata = await self.get() if 'key' not in metadata.params:", "server address. Defaults to a connection to localhost. See Also: :func:`aioredis.create_connection` \"\"\" return", "from secrets import token_bytes from typing import Any, Optional, Final from aioredis import", "separator self._prefix = prefix self._users_prefix = users_prefix self._users_json = users_json @property def backend_capability(self)", "AsyncIterator[Session]: config = self.config redis = self.redis self._redis = None if config.select is", "self._status) cleanup_task = CleanupTask(connect_redis, global_keys).start() stack.callback(cleanup_task.cancel) class Config(IMAPConfig): \"\"\"The config implementation for the", "await redis.get(user_key) if json_data is None: raise UserNotFound(self.name) data_dict = json.loads(json_data) else: data_dict", "pymap.user import UserMetadata from .cleanup import CleanupTask from .filter import FilterSet from .keys", "users_prefix self._users_json = users_json @property def backend_capability(self) -> BackendCapability: return BackendCapability(idle=True, object_id=True, multi_append=True)", "= await authcid_identity.get() except UserNotFound: metadata = UserMetadata(config) if 'key' in metadata.params: token_key", "key prefix. users_json: True if the user lookup value contains JSON. \"\"\" def", "self.tokens, redis, credentials.identity, role) class Identity(IdentityInterface): \"\"\"The identity implementation for the redis backend.\"\"\"", "= uuid.uuid4().hex.encode('ascii') ns_val = b'%d/%b' % (DATA_VERSION, new_namespace) multi = redis.multi_exec() multi.hsetnx(global_keys.namespaces, user_key,", "ArgumentParser: parser = subparsers.add_parser(name, help='redis backend') parser.add_argument('--address', metavar='URL', default='redis://localhost', help='the redis server address')", "the user lookup key contains a JSON object with a ``\"password\"`` attribute, instead", "Identity(config, self.tokens, redis, authcid) metadata = await authcid_identity.get() except UserNotFound: metadata = UserMetadata(config)", "the user lookup value contains JSON. \"\"\" def __init__(self, args: Namespace, *, address:", "\"\"\" def __init__(self, args: Namespace, *, address: str, select: Optional[int], separator: bytes, prefix:", "user_dict) await multi.execute() async def delete(self) -> None: config = self.config user_key =", "Final = config self.tokens: Final = tokens self._redis: Optional[Redis] = redis self._name =", "datetime import datetime from functools import partial from secrets import token_bytes from typing", "Optional[int], separator: bytes, prefix: bytes, users_prefix: bytes, users_json: bool, **extra: Any) -> None:", "'Session'] class RedisBackend(BackendInterface): \"\"\"Defines a backend that uses redis data structures for mailbox", "Defaults to a connection to localhost. See Also: :func:`aioredis.create_connection` \"\"\" return self._address @property", "apply to :attr:`.users_key`. \"\"\" return self._prefix @property def users_prefix(self) -> bytes: \"\"\"The prefix", "attribute, instead of a redis hash with a ``password`` key. See Also: `redis", "def users_prefix(self) -> bytes: \"\"\"The prefix for user lookup keys.\"\"\" return self._users_prefix @property", "**extra) self._address = address self._select = select self._separator = separator self._prefix = prefix", "self._redis = None if config.select is not None: await redis.select(config.select) global_keys = config._global_keys", "tokens(self) -> TokensInterface: return self._tokens async def authenticate(self, credentials: AuthenticationCredentials) \\ -> Identity:", "users_json: bool, **extra: Any) -> None: super().__init__(args, admin_key=token_bytes(), **extra) self._address = address self._select", "redis, credentials.identity, role) class Identity(IdentityInterface): \"\"\"The identity implementation for the redis backend.\"\"\" def", "Final = tokens self._redis: Optional[Redis] = redis self._name = name self._role = role", "default='/', help='the redis key segment separator') parser.add_argument('--prefix', metavar='VAL', default='/mail', help='the mail data key", "redis keys.\"\"\" return self._separator @property def prefix(self) -> bytes: \"\"\"The prefix for mail", "start(self, stack: AsyncExitStack) -> None: config = self._config global_keys = config._global_keys connect_redis =", "import FilterSet from .keys import DATA_VERSION, RedisKey, GlobalKeys, CleanupKeys, \\ NamespaceKeys from .mailbox", "__init__(self, args: Namespace, *, address: str, select: Optional[int], separator: bytes, prefix: bytes, users_prefix:", "= CleanupKeys(global_keys) mailbox_set = MailboxSet(redis, ns_keys, cl_keys) filter_set = FilterSet(redis, ns_keys) try: await", "the value from the user lookup key contains a JSON object with a", "self._redis if redis is None: # Other methods may not be called after", "UserMetadata(self.config, **data_dict) async def set(self, metadata: UserMetadata) -> None: config = self.config redis", "login implementation for the redis backend.\"\"\" def __init__(self, config: Config, connect_redis: Callable[[], Awaitable[Redis]])", "create_redis, Redis, ConnectionClosedError from pysasl.creds import AuthenticationCredentials from pymap.bytes import BytesFormat from pymap.config", "CleanupTask(connect_redis, global_keys).start() stack.callback(cleanup_task.cancel) class Config(IMAPConfig): \"\"\"The config implementation for the redis backend. Args:", "metadata = await authcid_identity.get() except UserNotFound: metadata = UserMetadata(config) if 'key' in metadata.params:", "pymap.interfaces.login import LoginInterface, IdentityInterface from pymap.interfaces.token import TokensInterface from pymap.token import AllTokens from", "a connection to localhost. See Also: :func:`aioredis.create_connection` \"\"\" return self._address @property def select(self)", "mail data keys. This prefix does not apply to :attr:`.users_key`. \"\"\" return self._prefix", "filter_set @property def config(self) -> IMAPConfig: return self._config @property def mailbox_set(self) -> MailboxSet:", "credentials.authcid_type == 'admin-token': authcid = credentials.identity role = 'admin' try: authcid_identity = Identity(config,", "def redis(self) -> Redis: redis = self._redis if redis is None: # Other", "filter_set: FilterSet) -> None: super().__init__(owner) self._redis = redis self._config = config self._mailbox_set =", "metavar='VAL', default='/users', help='the user lookup key prefix') parser.add_argument('--users-json', action='store_true', help='the user lookup value", "= HealthStatus() connect_redis = partial(cls._connect_redis, config, status) login = Login(config, connect_redis) return cls(login,", "import connection_exit from pymap.exceptions import AuthorizationFailure, IncompatibleData, \\ NotAllowedError, UserNotFound from pymap.health import", "default='/mail', help='the mail data key prefix') parser.add_argument('--users-prefix', metavar='VAL', default='/users', help='the user lookup key", "bytes, users_json: bool, **extra: Any) -> None: super().__init__(args, admin_key=token_bytes(), **extra) self._address = address", "self._separator = separator self._prefix = prefix self._users_prefix = users_prefix self._users_json = users_json @property", "https://redis.io/commands/select \"\"\" return self._select @property def separator(self) -> bytes: \"\"\"The bytestring used to", "self._users_json @property def _joiner(self) -> BytesFormat: return BytesFormat(self.separator) @property def _users_root(self) -> RedisKey:", "methods may not be called after new_session(), since it # may have called", "Other methods may not be called after new_session(), since it # may have", "import partial from secrets import token_bytes from typing import Any, Optional, Final from", "storage. \"\"\" def __init__(self, login: Login, config: Config, status: HealthStatus) -> None: super().__init__()", "args.users_prefix.encode('utf-8'), 'users_json': args.users_json} class Session(BaseSession[Message]): \"\"\"The session implementation for the redis backend.\"\"\" resource", "-> Mapping[str, Any]: return {**super().parse_args(args), 'address': args.address, 'select': args.select, 'separator': args.separator.encode('utf-8'), 'prefix': args.prefix.encode('utf-8'),", "FilterSet) -> None: super().__init__(owner) self._redis = redis self._config = config self._mailbox_set = mailbox_set", "data structures for mailbox storage. \"\"\" def __init__(self, login: Login, config: Config, status:", "'users_json': args.users_json} class Session(BaseSession[Message]): \"\"\"The session implementation for the redis backend.\"\"\" resource =", "async def _get_namespace(self, redis: Redis, global_keys: GlobalKeys, user: str) -> bytes: user_key =", "ns_val.split(b'/', 1) if int(version) != DATA_VERSION: raise IncompatibleData() return namespace async def get(self)", "value contains JSON') return parser @classmethod async def init(cls, args: Namespace, **overrides: Any)", "cleanup_task = CleanupTask(connect_redis, global_keys).start() stack.callback(cleanup_task.cancel) class Config(IMAPConfig): \"\"\"The config implementation for the redis", "(DATA_VERSION, new_namespace) multi = redis.multi_exec() multi.hsetnx(global_keys.namespaces, user_key, ns_val) multi.hget(global_keys.namespaces, user_key) _, ns_val =", "str, select: Optional[int], separator: bytes, prefix: bytes, users_prefix: bytes, users_json: bool, **extra: Any)", "return UserMetadata(self.config, **data_dict) async def set(self, metadata: UserMetadata) -> None: config = self.config", "GlobalKeys, user: str) -> bytes: user_key = user.encode('utf-8') new_namespace = uuid.uuid4().hex.encode('ascii') ns_val =", "separator: bytes, prefix: bytes, users_prefix: bytes, users_json: bool, **extra: Any) -> None: super().__init__(args,", "-> UserMetadata: redis = self.redis user_bytes = self.name.encode('utf-8') user_key = self.config._users_root.end(user_bytes) if self.config.users_json:", "DATA_VERSION, RedisKey, GlobalKeys, CleanupKeys, \\ NamespaceKeys from .mailbox import Message, MailboxSet from ..session", "= metadata.to_dict(key=token_bytes().hex()) if self.config.users_json: json_data = json.dumps(user_dict) await redis.set(user_key, json_data) else: multi =", "= AllTokens() @property def tokens(self) -> TokensInterface: return self._tokens async def authenticate(self, credentials:", "may not be called after new_session(), since it # may have called SELECT", "new_session(), since it # may have called SELECT on the connection. raise RuntimeError()", "args: Namespace, **overrides: Any) \\ -> tuple[RedisBackend, Config]: config = Config.from_args(args) status =", "from pymap.bytes import BytesFormat from pymap.config import BackendCapability, IMAPConfig from pymap.context import connection_exit", "= __name__ def __init__(self, redis: Redis, owner: str, config: Config, mailbox_set: MailboxSet, filter_set:", "json_data = await redis.get(user_key) if json_data is None: raise UserNotFound(self.name) data_dict = json.loads(json_data)", "stack.callback(cleanup_task.cancel) class Config(IMAPConfig): \"\"\"The config implementation for the redis backend. Args: args: The", "help='the user lookup value contains JSON') return parser @classmethod async def init(cls, args:", "-> GlobalKeys: key = RedisKey(self._joiner, [self.prefix], {}) return GlobalKeys(key) @classmethod def parse_args(cls, args:", "-> Login: return self._login @property def config(self) -> Config: return self._config @property def", "= 'admin' try: authcid_identity = Identity(config, self.tokens, redis, authcid) metadata = await authcid_identity.get()", "= self.redis self._redis = None if config.select is not None: await redis.select(config.select) global_keys", "= await self._get_namespace(redis, global_keys, self.name) ns_keys = NamespaceKeys(global_keys, namespace) cl_keys = CleanupKeys(global_keys) mailbox_set", "str, config: Config, mailbox_set: MailboxSet, filter_set: FilterSet) -> None: super().__init__(owner) self._redis = redis", "resource = __name__ def __init__(self, redis: Redis, owner: str, config: Config, mailbox_set: MailboxSet,", "None key = bytes.fromhex(metadata.params['key']) return self.tokens.get_login_token(self.name, key) @asynccontextmanager async def new_session(self) -> AsyncIterator[Session]:", "raise UserNotFound(self.name) return UserMetadata(self.config, **data_dict) async def set(self, metadata: UserMetadata) -> None: config", "given, the `SELECT`_ command is called after successful user lookup. .. _SELECT: https://redis.io/commands/select", "data_dict = json.loads(json_data) else: data_dict = await redis.hgetall(user_key, encoding='utf-8') if data_dict is None:", "multi = redis.multi_exec() multi.delete(user_key) multi.hmset_dict(user_key, user_dict) await multi.execute() async def delete(self) -> None:", "encoding='utf-8') if data_dict is None: raise UserNotFound(self.name) return UserMetadata(self.config, **data_dict) async def set(self,", "data') parser.add_argument('--separator', metavar='CHAR', default='/', help='the redis key segment separator') parser.add_argument('--prefix', metavar='VAL', default='/mail', help='the", "AsyncExitStack from datetime import datetime from functools import partial from secrets import token_bytes", "= Config.from_args(args) status = HealthStatus() connect_redis = partial(cls._connect_redis, config, status) login = Login(config,", "return self._select @property def separator(self) -> bytes: \"\"\"The bytestring used to separate segments", "self.name) ns_keys = NamespaceKeys(global_keys, namespace) cl_keys = CleanupKeys(global_keys) mailbox_set = MailboxSet(redis, ns_keys, cl_keys)", "global_keys = config._global_keys connect_redis = partial(self._connect_redis, config, self._status) cleanup_task = CleanupTask(connect_redis, global_keys).start() stack.callback(cleanup_task.cancel)", "token_key = bytes.fromhex(metadata.params['key']) role = role or metadata.role await metadata.check_password(credentials, token_key=token_key) if role", "None role: Optional[str] = None if credentials.authcid_type == 'admin-token': authcid = credentials.identity role", "from .filter import FilterSet from .keys import DATA_VERSION, RedisKey, GlobalKeys, CleanupKeys, \\ NamespaceKeys", "raise NotAllowedError('Cannot assign role.') user_key = config._users_root.end(self.name.encode('utf-8')) user_dict = metadata.to_dict(key=token_bytes().hex()) if self.config.users_json: json_data", "redis async def start(self, stack: AsyncExitStack) -> None: config = self._config global_keys =", "filter_set) async def _get_namespace(self, redis: Redis, global_keys: GlobalKeys, user: str) -> bytes: user_key", "BackendInterface from pymap.interfaces.login import LoginInterface, IdentityInterface from pymap.interfaces.token import TokensInterface from pymap.token import", "= config._users_root.end(self.name.encode('utf-8')) user_dict = metadata.to_dict(key=token_bytes().hex()) if self.config.users_json: json_data = json.dumps(user_dict) await redis.set(user_key, json_data)", "import AllTokens from pymap.user import UserMetadata from .cleanup import CleanupTask from .filter import", "@classmethod async def init(cls, args: Namespace, **overrides: Any) \\ -> tuple[RedisBackend, Config]: config", "= self._redis if redis is None: # Other methods may not be called", "= json.loads(json_data) else: data_dict = await redis.hgetall(user_key, encoding='utf-8') if data_dict is None: raise", "'admin' and metadata.role: raise NotAllowedError('Cannot assign role.') user_key = config._users_root.end(self.name.encode('utf-8')) user_dict = metadata.to_dict(key=token_bytes().hex())", "__name__ def __init__(self, redis: Redis, owner: str, config: Config, mailbox_set: MailboxSet, filter_set: FilterSet)", "HealthStatus from pymap.interfaces.backend import BackendInterface from pymap.interfaces.login import LoginInterface, IdentityInterface from pymap.interfaces.token import", "Redis, global_keys: GlobalKeys, user: str) -> bytes: user_key = user.encode('utf-8') new_namespace = uuid.uuid4().hex.encode('ascii')", "AllTokens from pymap.user import UserMetadata from .cleanup import CleanupTask from .filter import FilterSet", "= None role: Optional[str] = None if credentials.authcid_type == 'admin-token': authcid = credentials.identity", "from datetime import datetime from functools import partial from secrets import token_bytes from", "pymap.token import AllTokens from pymap.user import UserMetadata from .cleanup import CleanupTask from .filter", "a JSON object with a ``\"password\"`` attribute, instead of a redis hash with", "'users_prefix': args.users_prefix.encode('utf-8'), 'users_json': args.users_json} class Session(BaseSession[Message]): \"\"\"The session implementation for the redis backend.\"\"\"", "used to separate segments of composite redis keys.\"\"\" return self._separator @property def prefix(self)", "pymap.bytes import BytesFormat from pymap.config import BackendCapability, IMAPConfig from pymap.context import connection_exit from", "redis = await self._connect_redis() authcid = credentials.authcid token_key: Optional[bytes] = None role: Optional[str]", "since it # may have called SELECT on the connection. raise RuntimeError() return", "async def start(self, stack: AsyncExitStack) -> None: config = self._config global_keys = config._global_keys", "@property def tokens(self) -> TokensInterface: return self._tokens async def authenticate(self, credentials: AuthenticationCredentials) \\", "implementation for the redis backend.\"\"\" def __init__(self, config: Config, tokens: TokensInterface, redis: Redis,", "__init__(self, login: Login, config: Config, status: HealthStatus) -> None: super().__init__() self._login = login", "redis = self.redis user_bytes = self.name.encode('utf-8') user_key = self.config._users_root.end(user_bytes) if self.config.users_json: json_data =", "= config self._mailbox_set = mailbox_set self._filter_set = filter_set @property def config(self) -> IMAPConfig:", "try: await mailbox_set.add_mailbox('INBOX') except ValueError: pass yield Session(redis, self.name, config, mailbox_set, filter_set) async", "= json.dumps(user_dict) await redis.set(user_key, json_data) else: multi = redis.multi_exec() multi.delete(user_key) multi.hmset_dict(user_key, user_dict) await", "Optional[str]: metadata = await self.get() if 'key' not in metadata.params: return None key", "help='the redis database for mail data') parser.add_argument('--separator', metavar='CHAR', default='/', help='the redis key segment", "self._redis: Optional[Redis] = redis self._name = name self._role = role @property def name(self)", "True if the user lookup value contains JSON. \"\"\" def __init__(self, args: Namespace,", "= mailbox_set self._filter_set = filter_set @property def config(self) -> IMAPConfig: return self._config @property", "authcid_identity.get() except UserNotFound: metadata = UserMetadata(config) if 'key' in metadata.params: token_key = bytes.fromhex(metadata.params['key'])", "@property def separator(self) -> bytes: \"\"\"The bytestring used to separate segments of composite", "lookup key contains a JSON object with a ``\"password\"`` attribute, instead of a", "backend.\"\"\" resource = __name__ def __init__(self, redis: Redis, owner: str, config: Config, mailbox_set:", "with a ``\"password\"`` attribute, instead of a redis hash with a ``password`` key.", "None: config = self.config user_key = config._users_root.end(self.name.encode('utf-8')) if not await self.redis.delete(user_key): raise UserNotFound(self.name)", "segment separator') parser.add_argument('--prefix', metavar='VAL', default='/mail', help='the mail data key prefix') parser.add_argument('--users-prefix', metavar='VAL', default='/users',", "prefix: The prefix for mail data keys. users_prefix: The user lookup key prefix.", "= partial(self._connect_redis, config, self._status) cleanup_task = CleanupTask(connect_redis, global_keys).start() stack.callback(cleanup_task.cancel) class Config(IMAPConfig): \"\"\"The config", "_SELECT: https://redis.io/commands/select \"\"\" return self._select @property def separator(self) -> bytes: \"\"\"The bytestring used", "= None) -> None: super().__init__() self.config: Final = config self.tokens: Final = tokens", "login: Login, config: Config, status: HealthStatus) -> None: super().__init__() self._login = login self._config", "new_token(self, *, expiration: datetime = None) -> Optional[str]: metadata = await self.get() if", "key = bytes.fromhex(metadata.params['key']) return self.tokens.get_login_token(self.name, key) @asynccontextmanager async def new_session(self) -> AsyncIterator[Session]: config", "lookup value contains JSON. \"\"\" def __init__(self, args: Namespace, *, address: str, select:", "from pymap.interfaces.login import LoginInterface, IdentityInterface from pymap.interfaces.token import TokensInterface from pymap.token import AllTokens", "None: config = self._config global_keys = config._global_keys connect_redis = partial(self._connect_redis, config, self._status) cleanup_task", "segment separator. prefix: The prefix for mail data keys. users_prefix: The user lookup", "Optional[int]: \"\"\"The redis database for mail data. If given, the `SELECT`_ command is", "UserMetadata(config) if 'key' in metadata.params: token_key = bytes.fromhex(metadata.params['key']) role = role or metadata.role", "key segment separator. prefix: The prefix for mail data keys. users_prefix: The user", "if the user lookup value contains JSON. \"\"\" def __init__(self, args: Namespace, *,", "redis database for mail data. If given, the `SELECT`_ command is called after", "= None if credentials.authcid_type == 'admin-token': authcid = credentials.identity role = 'admin' try:", "!= 'admin' and metadata.role: raise NotAllowedError('Cannot assign role.') user_key = config._users_root.end(self.name.encode('utf-8')) user_dict =", "The redis database for mail data. separator: The redis key segment separator. prefix:", "\"\"\" return self._select @property def separator(self) -> bytes: \"\"\"The bytestring used to separate", "connect_redis: Callable[[], Awaitable[Redis]]) -> None: super().__init__() self._config = config self._connect_redis = connect_redis self._tokens", "not be called after new_session(), since it # may have called SELECT on", "assign role.') user_key = config._users_root.end(self.name.encode('utf-8')) user_dict = metadata.to_dict(key=token_bytes().hex()) if self.config.users_json: json_data = json.dumps(user_dict)", "is called after successful user lookup. .. _SELECT: https://redis.io/commands/select \"\"\" return self._select @property", "partial(cls._connect_redis, config, status) login = Login(config, connect_redis) return cls(login, config, status), config @classmethod", "role != 'admin' and authcid != credentials.identity: raise AuthorizationFailure() return Identity(config, self.tokens, redis,", "self._config = config self._connect_redis = connect_redis self._tokens = AllTokens() @property def tokens(self) ->", "for the redis backend.\"\"\" def __init__(self, config: Config, tokens: TokensInterface, redis: Redis, name:", "async def delete(self) -> None: config = self.config user_key = config._users_root.end(self.name.encode('utf-8')) if not", "it # may have called SELECT on the connection. raise RuntimeError() return redis", "authcid != credentials.identity: raise AuthorizationFailure() return Identity(config, self.tokens, redis, credentials.identity, role) class Identity(IdentityInterface):", "IdentityInterface from pymap.interfaces.token import TokensInterface from pymap.token import AllTokens from pymap.user import UserMetadata", "segments of composite redis keys.\"\"\" return self._separator @property def prefix(self) -> bytes: \"\"\"The", "@property def users_json(self) -> bool: \"\"\"True if the value from the user lookup", "MailboxSet: return self._mailbox_set @property def filter_set(self) -> FilterSet: return self._filter_set class Login(LoginInterface): \"\"\"The", "Config, connect_redis: Callable[[], Awaitable[Redis]]) -> None: super().__init__() self._config = config self._connect_redis = connect_redis", "BackendCapability: return BackendCapability(idle=True, object_id=True, multi_append=True) @property def address(self) -> str: \"\"\"The redis server", "-> BackendCapability: return BackendCapability(idle=True, object_id=True, multi_append=True) @property def address(self) -> str: \"\"\"The redis", "[self.prefix], {}) return GlobalKeys(key) @classmethod def parse_args(cls, args: Namespace) -> Mapping[str, Any]: return", "try: authcid_identity = Identity(config, self.tokens, redis, authcid) metadata = await authcid_identity.get() except UserNotFound:", "identity implementation for the redis backend.\"\"\" def __init__(self, config: Config, tokens: TokensInterface, redis:", "metadata.role: raise NotAllowedError('Cannot assign role.') user_key = config._users_root.end(self.name.encode('utf-8')) user_dict = metadata.to_dict(key=token_bytes().hex()) if self.config.users_json:", "keys.\"\"\" return self._users_prefix @property def users_json(self) -> bool: \"\"\"True if the value from", "token_key: Optional[bytes] = None role: Optional[str] = None if credentials.authcid_type == 'admin-token': authcid", "self._tokens async def authenticate(self, credentials: AuthenticationCredentials) \\ -> Identity: config = self._config redis", "command is called after successful user lookup. .. _SELECT: https://redis.io/commands/select \"\"\" return self._select", "\"\"\"The session implementation for the redis backend.\"\"\" resource = __name__ def __init__(self, redis:", "authcid = credentials.authcid token_key: Optional[bytes] = None role: Optional[str] = None if credentials.authcid_type", ".mailbox import Message, MailboxSet from ..session import BaseSession __all__ = ['RedisBackend', 'Config', 'Session']", "if json_data is None: raise UserNotFound(self.name) data_dict = json.loads(json_data) else: data_dict = await", "user lookup key prefix. users_json: True if the user lookup value contains JSON.", "= redis self._config = config self._mailbox_set = mailbox_set self._filter_set = filter_set @property def", "owner: str, config: Config, mailbox_set: MailboxSet, filter_set: FilterSet) -> None: super().__init__(owner) self._redis =", "def __init__(self, redis: Redis, owner: str, config: Config, mailbox_set: MailboxSet, filter_set: FilterSet) ->", "None: super().__init__(args, admin_key=token_bytes(), **extra) self._address = address self._select = select self._separator = separator", "__init__(self, redis: Redis, owner: str, config: Config, mailbox_set: MailboxSet, filter_set: FilterSet) -> None:", "select(self) -> Optional[int]: \"\"\"The redis database for mail data. If given, the `SELECT`_", "authcid_identity = Identity(config, self.tokens, redis, authcid) metadata = await authcid_identity.get() except UserNotFound: metadata", "if int(version) != DATA_VERSION: raise IncompatibleData() return namespace async def get(self) -> UserMetadata:", "-> bytes: \"\"\"The prefix for mail data keys. This prefix does not apply", "Any) -> ArgumentParser: parser = subparsers.add_parser(name, help='redis backend') parser.add_argument('--address', metavar='URL', default='redis://localhost', help='the redis", "for the redis backend.\"\"\" resource = __name__ def __init__(self, redis: Redis, owner: str,", "\"\"\"The identity implementation for the redis backend.\"\"\" def __init__(self, config: Config, tokens: TokensInterface,", "UserMetadata: redis = self.redis user_bytes = self.name.encode('utf-8') user_key = self.config._users_root.end(user_bytes) if self.config.users_json: json_data", "args.address, 'select': args.select, 'separator': args.separator.encode('utf-8'), 'prefix': args.prefix.encode('utf-8'), 'users_prefix': args.users_prefix.encode('utf-8'), 'users_json': args.users_json} class Session(BaseSession[Message]):", "config, self._status) cleanup_task = CleanupTask(connect_redis, global_keys).start() stack.callback(cleanup_task.cancel) class Config(IMAPConfig): \"\"\"The config implementation for", "= MailboxSet(redis, ns_keys, cl_keys) filter_set = FilterSet(redis, ns_keys) try: await mailbox_set.add_mailbox('INBOX') except ValueError:", "DATA_VERSION: raise IncompatibleData() return namespace async def get(self) -> UserMetadata: redis = self.redis", "credentials.identity, role) class Identity(IdentityInterface): \"\"\"The identity implementation for the redis backend.\"\"\" def __init__(self,", "status.set_healthy() stack = connection_exit.get() stack.enter_context(closing(redis)) return redis async def start(self, stack: AsyncExitStack) ->", "[self.users_prefix], {}) @property def _global_keys(self) -> GlobalKeys: key = RedisKey(self._joiner, [self.prefix], {}) return", "localhost. See Also: :func:`aioredis.create_connection` \"\"\" return self._address @property def select(self) -> Optional[int]: \"\"\"The", "RuntimeError() return redis async def new_token(self, *, expiration: datetime = None) -> Optional[str]:", "__future__ import annotations import json import uuid from argparse import ArgumentParser, Namespace from", "self.config.users_json: json_data = await redis.get(user_key) if json_data is None: raise UserNotFound(self.name) data_dict =", "backend.\"\"\" def __init__(self, config: Config, tokens: TokensInterface, redis: Redis, name: str, role: str", "self._role = role @property def name(self) -> str: return self._name @property def redis(self)", "= users_prefix self._users_json = users_json @property def backend_capability(self) -> BackendCapability: return BackendCapability(idle=True, object_id=True,", "role @property def name(self) -> str: return self._name @property def redis(self) -> Redis:", "value from the user lookup key contains a JSON object with a ``\"password\"``", "json.loads(json_data) else: data_dict = await redis.hgetall(user_key, encoding='utf-8') if data_dict is None: raise UserNotFound(self.name)", "Config]: config = Config.from_args(args) status = HealthStatus() connect_redis = partial(cls._connect_redis, config, status) login", "= self.config._users_root.end(user_bytes) if self.config.users_json: json_data = await redis.get(user_key) if json_data is None: raise", "self.name, config, mailbox_set, filter_set) async def _get_namespace(self, redis: Redis, global_keys: GlobalKeys, user: str)", "of composite redis keys.\"\"\" return self._separator @property def prefix(self) -> bytes: \"\"\"The prefix", "= Login(config, connect_redis) return cls(login, config, status), config @classmethod async def _connect_redis(cls, config:", "metavar='VAL', default='/mail', help='the mail data key prefix') parser.add_argument('--users-prefix', metavar='VAL', default='/users', help='the user lookup", "status.set_unhealthy() raise else: status.set_healthy() stack = connection_exit.get() stack.enter_context(closing(redis)) return redis async def start(self," ]
[ "def process_match_result(self, match_result, pattern_name): if pattern_name == 'lstm_cell': self.process_lstm_cell(match_result) elif pattern_name == 'h_zero':", "'lstm_cell': self.process_lstm_cell(match_result) elif pattern_name == 'h_zero': if self.check_match_scope(match_result, 'LSTMCellZeroState'): self.process_rnn_h_zero(match_result) '''For some short", "match_result._pattern_to_op.values() for op in ops: op_name_splits = op.name.split('/') if len(op_name_splits) < 2: return", "match_result): if 'h_zero' not in match_result._name_to_pattern.keys(): return kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']]", "mmdnn.conversion.rewriter.rewriter import UnitRewriterBase import numpy as np import re class LSTMRewriter(UnitRewriterBase): def __init__(self,", "= match_result._pattern_to_op[match_result._name_to_pattern['h_zero']] fill_size = match_result.get_op('fill_size') fill_value = match_result.get_op('fill_value') kwargs['fill_size'] = fill_size.get_attr('value').int_val[0] kwargs['fill_value'] =", "w.shape[1]//4 [wx, wh] = np.split(w, [-1 * num_units]) input_size = wx.shape[0] kwargs['num_units'] =", "ops = match_result._pattern_to_op.values() for op in ops: op_name_splits = op.name.split('/') if len(op_name_splits) <", "else: top_node.kwargs = kwargs def process_rnn_h_zero(self, match_result): if 'h_zero' not in match_result._name_to_pattern.keys(): return", "= w.shape[1]//4 [wx, wh] = np.split(w, [-1 * num_units]) input_size = wx.shape[0] kwargs['num_units']", "hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def process_match_result(self, match_result, pattern_name): if pattern_name", "else: top_node.kwargs = kwargs def process_match_result(self, match_result, pattern_name): if pattern_name == 'lstm_cell': self.process_lstm_cell(match_result)", "fill_size = match_result.get_op('fill_size') fill_value = match_result.get_op('fill_value') kwargs['fill_size'] = fill_size.get_attr('value').int_val[0] kwargs['fill_value'] = fill_value.get_attr('value').float_val[0] if", "re.sub(r'(_\\d+)*$', '', op_name_splits[-3]) != scope_name: return False else: return False return True def", "if len(op_name_splits) < 2: return False if re.sub(r'(_\\d+)*$', '', op_name_splits[-2]) != scope_name: if", "> 2: if re.sub(r'(_\\d+)*$', '', op_name_splits[-3]) != scope_name: return False else: return False", "short pattern, to avoid match other pattern, check it's scope''' def check_match_scope(self, match_result,", "scope_name: if len(op_name_splits) > 2: if re.sub(r'(_\\d+)*$', '', op_name_splits[-3]) != scope_name: return False", "top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']] fill_size = match_result.get_op('fill_size') fill_value = match_result.get_op('fill_value') kwargs['fill_size'] = fill_size.get_attr('value').int_val[0] kwargs['fill_value']", "UnitRewriterBase import numpy as np import re class LSTMRewriter(UnitRewriterBase): def __init__(self, graph, weights_dict):", "op_name_splits = op.name.split('/') if len(op_name_splits) < 2: return False if re.sub(r'(_\\d+)*$', '', op_name_splits[-2])", "kwargs['input_size'] = input_size if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def process_rnn_h_zero(self,", "check_match_scope(self, match_result, scope_name): ops = match_result._pattern_to_op.values() for op in ops: op_name_splits = op.name.split('/')", "False else: return False return True def run(self): return super(LSTMRewriter, self).run(['lstm_cell', 'h_zero'], 'tensorflow')", "kwargs def process_match_result(self, match_result, pattern_name): if pattern_name == 'lstm_cell': self.process_lstm_cell(match_result) elif pattern_name ==", "super(LSTMRewriter, self).__init__(graph, weights_dict) def process_lstm_cell(self, match_result): if 'lstm_cell' not in match_result._pattern_to_op.keys(): return kwargs", "re.sub(r'(_\\d+)*$', '', op_name_splits[-2]) != scope_name: if len(op_name_splits) > 2: if re.sub(r'(_\\d+)*$', '', op_name_splits[-3])", "return kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']] fill_size = match_result.get_op('fill_size') fill_value = match_result.get_op('fill_value')", "scope_name: return False else: return False return True def run(self): return super(LSTMRewriter, self).run(['lstm_cell',", "numpy as np import re class LSTMRewriter(UnitRewriterBase): def __init__(self, graph, weights_dict): return super(LSTMRewriter,", "= match_result.get_op('fill_value') kwargs['fill_size'] = fill_size.get_attr('value').int_val[0] kwargs['fill_value'] = fill_value.get_attr('value').float_val[0] if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else:", "= wx.shape[0] kwargs['num_units'] = num_units kwargs['input_size'] = input_size if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else:", "if pattern_name == 'lstm_cell': self.process_lstm_cell(match_result) elif pattern_name == 'h_zero': if self.check_match_scope(match_result, 'LSTMCellZeroState'): self.process_rnn_h_zero(match_result)", "other pattern, check it's scope''' def check_match_scope(self, match_result, scope_name): ops = match_result._pattern_to_op.values() for", "[wx, wh] = np.split(w, [-1 * num_units]) input_size = wx.shape[0] kwargs['num_units'] = num_units", "np.split(w, [-1 * num_units]) input_size = wx.shape[0] kwargs['num_units'] = num_units kwargs['input_size'] = input_size", "'h_zero' not in match_result._name_to_pattern.keys(): return kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']] fill_size =", "match_result._name_to_pattern.keys(): return kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']] fill_size = match_result.get_op('fill_size') fill_value =", "wh] = np.split(w, [-1 * num_units]) input_size = wx.shape[0] kwargs['num_units'] = num_units kwargs['input_size']", "match_result.get_op('fill_size') fill_value = match_result.get_op('fill_value') kwargs['fill_size'] = fill_size.get_attr('value').int_val[0] kwargs['fill_value'] = fill_value.get_attr('value').float_val[0] if hasattr(top_node, 'kwargs'):", "scope_name): ops = match_result._pattern_to_op.values() for op in ops: op_name_splits = op.name.split('/') if len(op_name_splits)", "for op in ops: op_name_splits = op.name.split('/') if len(op_name_splits) < 2: return False", "if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def process_match_result(self, match_result, pattern_name): if", "import re class LSTMRewriter(UnitRewriterBase): def __init__(self, graph, weights_dict): return super(LSTMRewriter, self).__init__(graph, weights_dict) def", "match_result, pattern_name): if pattern_name == 'lstm_cell': self.process_lstm_cell(match_result) elif pattern_name == 'h_zero': if self.check_match_scope(match_result,", "dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']] fill_size = match_result.get_op('fill_size') fill_value = match_result.get_op('fill_value') kwargs['fill_size'] = fill_size.get_attr('value').int_val[0]", "= num_units kwargs['input_size'] = input_size if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs", "match_result._pattern_to_op.keys(): return kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']] w_e = match_result.get_op(\"cell_kernel\") w =", "pattern, check it's scope''' def check_match_scope(self, match_result, scope_name): ops = match_result._pattern_to_op.values() for op", "def process_rnn_h_zero(self, match_result): if 'h_zero' not in match_result._name_to_pattern.keys(): return kwargs = dict() top_node", "'lstm_cell' not in match_result._pattern_to_op.keys(): return kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']] w_e =", "ops: op_name_splits = op.name.split('/') if len(op_name_splits) < 2: return False if re.sub(r'(_\\d+)*$', '',", "match_result, scope_name): ops = match_result._pattern_to_op.values() for op in ops: op_name_splits = op.name.split('/') if", "if 'h_zero' not in match_result._name_to_pattern.keys(): return kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']] fill_size", "dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']] w_e = match_result.get_op(\"cell_kernel\") w = self._weights_dict[w_e.name.replace('/read', '')] num_units =", "fill_size.get_attr('value').int_val[0] kwargs['fill_value'] = fill_value.get_attr('value').float_val[0] if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def", "= fill_size.get_attr('value').int_val[0] kwargs['fill_value'] = fill_value.get_attr('value').float_val[0] if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs", "= match_result._pattern_to_op.values() for op in ops: op_name_splits = op.name.split('/') if len(op_name_splits) < 2:", "weights_dict): return super(LSTMRewriter, self).__init__(graph, weights_dict) def process_lstm_cell(self, match_result): if 'lstm_cell' not in match_result._pattern_to_op.keys():", "kwargs['fill_size'] = fill_size.get_attr('value').int_val[0] kwargs['fill_value'] = fill_value.get_attr('value').float_val[0] if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs =", "'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def process_rnn_h_zero(self, match_result): if 'h_zero' not in", "check it's scope''' def check_match_scope(self, match_result, scope_name): ops = match_result._pattern_to_op.values() for op in", "return False else: return False return True def run(self): return super(LSTMRewriter, self).run(['lstm_cell', 'h_zero'],", "len(op_name_splits) < 2: return False if re.sub(r'(_\\d+)*$', '', op_name_splits[-2]) != scope_name: if len(op_name_splits)", "op_name_splits[-3]) != scope_name: return False else: return False return True def run(self): return", "self).__init__(graph, weights_dict) def process_lstm_cell(self, match_result): if 'lstm_cell' not in match_result._pattern_to_op.keys(): return kwargs =", "not in match_result._pattern_to_op.keys(): return kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']] w_e = match_result.get_op(\"cell_kernel\")", "'h_zero': if self.check_match_scope(match_result, 'LSTMCellZeroState'): self.process_rnn_h_zero(match_result) '''For some short pattern, to avoid match other", "from mmdnn.conversion.rewriter.rewriter import UnitRewriterBase import numpy as np import re class LSTMRewriter(UnitRewriterBase): def", "if 'lstm_cell' not in match_result._pattern_to_op.keys(): return kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']] w_e", "in match_result._name_to_pattern.keys(): return kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']] fill_size = match_result.get_op('fill_size') fill_value", "fill_value = match_result.get_op('fill_value') kwargs['fill_size'] = fill_size.get_attr('value').int_val[0] kwargs['fill_value'] = fill_value.get_attr('value').float_val[0] if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs)", "self.process_lstm_cell(match_result) elif pattern_name == 'h_zero': if self.check_match_scope(match_result, 'LSTMCellZeroState'): self.process_rnn_h_zero(match_result) '''For some short pattern,", "if re.sub(r'(_\\d+)*$', '', op_name_splits[-3]) != scope_name: return False else: return False return True", "match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']] w_e = match_result.get_op(\"cell_kernel\") w = self._weights_dict[w_e.name.replace('/read', '')] num_units = w.shape[1]//4 [wx, wh]", "'''For some short pattern, to avoid match other pattern, check it's scope''' def", "match_result.get_op('fill_value') kwargs['fill_size'] = fill_size.get_attr('value').int_val[0] kwargs['fill_value'] = fill_value.get_attr('value').float_val[0] if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs", "pattern, to avoid match other pattern, check it's scope''' def check_match_scope(self, match_result, scope_name):", "= np.split(w, [-1 * num_units]) input_size = wx.shape[0] kwargs['num_units'] = num_units kwargs['input_size'] =", "top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def process_rnn_h_zero(self, match_result): if 'h_zero' not in match_result._name_to_pattern.keys():", "= self._weights_dict[w_e.name.replace('/read', '')] num_units = w.shape[1]//4 [wx, wh] = np.split(w, [-1 * num_units])", "!= scope_name: return False else: return False return True def run(self): return super(LSTMRewriter,", "num_units kwargs['input_size'] = input_size if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def", "pattern_name == 'h_zero': if self.check_match_scope(match_result, 'LSTMCellZeroState'): self.process_rnn_h_zero(match_result) '''For some short pattern, to avoid", "input_size = wx.shape[0] kwargs['num_units'] = num_units kwargs['input_size'] = input_size if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs)", "= dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']] w_e = match_result.get_op(\"cell_kernel\") w = self._weights_dict[w_e.name.replace('/read', '')] num_units", "self.process_rnn_h_zero(match_result) '''For some short pattern, to avoid match other pattern, check it's scope'''", "top_node.kwargs = kwargs def process_match_result(self, match_result, pattern_name): if pattern_name == 'lstm_cell': self.process_lstm_cell(match_result) elif", "in ops: op_name_splits = op.name.split('/') if len(op_name_splits) < 2: return False if re.sub(r'(_\\d+)*$',", "= match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']] w_e = match_result.get_op(\"cell_kernel\") w = self._weights_dict[w_e.name.replace('/read', '')] num_units = w.shape[1]//4 [wx,", "'', op_name_splits[-2]) != scope_name: if len(op_name_splits) > 2: if re.sub(r'(_\\d+)*$', '', op_name_splits[-3]) !=", "kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']] fill_size = match_result.get_op('fill_size') fill_value = match_result.get_op('fill_value') kwargs['fill_size']", "match_result._pattern_to_op[match_result._name_to_pattern['h_zero']] fill_size = match_result.get_op('fill_size') fill_value = match_result.get_op('fill_value') kwargs['fill_size'] = fill_size.get_attr('value').int_val[0] kwargs['fill_value'] = fill_value.get_attr('value').float_val[0]", "w_e = match_result.get_op(\"cell_kernel\") w = self._weights_dict[w_e.name.replace('/read', '')] num_units = w.shape[1]//4 [wx, wh] =", "np import re class LSTMRewriter(UnitRewriterBase): def __init__(self, graph, weights_dict): return super(LSTMRewriter, self).__init__(graph, weights_dict)", "__init__(self, graph, weights_dict): return super(LSTMRewriter, self).__init__(graph, weights_dict) def process_lstm_cell(self, match_result): if 'lstm_cell' not", "== 'lstm_cell': self.process_lstm_cell(match_result) elif pattern_name == 'h_zero': if self.check_match_scope(match_result, 'LSTMCellZeroState'): self.process_rnn_h_zero(match_result) '''For some", "import numpy as np import re class LSTMRewriter(UnitRewriterBase): def __init__(self, graph, weights_dict): return", "'', op_name_splits[-3]) != scope_name: return False else: return False return True def run(self):", "re class LSTMRewriter(UnitRewriterBase): def __init__(self, graph, weights_dict): return super(LSTMRewriter, self).__init__(graph, weights_dict) def process_lstm_cell(self,", "= kwargs def process_rnn_h_zero(self, match_result): if 'h_zero' not in match_result._name_to_pattern.keys(): return kwargs =", "== 'h_zero': if self.check_match_scope(match_result, 'LSTMCellZeroState'): self.process_rnn_h_zero(match_result) '''For some short pattern, to avoid match", "= match_result.get_op(\"cell_kernel\") w = self._weights_dict[w_e.name.replace('/read', '')] num_units = w.shape[1]//4 [wx, wh] = np.split(w,", "it's scope''' def check_match_scope(self, match_result, scope_name): ops = match_result._pattern_to_op.values() for op in ops:", "top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def process_match_result(self, match_result, pattern_name): if pattern_name == 'lstm_cell':", "= fill_value.get_attr('value').float_val[0] if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def process_match_result(self, match_result,", "kwargs def process_rnn_h_zero(self, match_result): if 'h_zero' not in match_result._name_to_pattern.keys(): return kwargs = dict()", "top_node.kwargs = kwargs def process_rnn_h_zero(self, match_result): if 'h_zero' not in match_result._name_to_pattern.keys(): return kwargs", "def process_lstm_cell(self, match_result): if 'lstm_cell' not in match_result._pattern_to_op.keys(): return kwargs = dict() top_node", "process_lstm_cell(self, match_result): if 'lstm_cell' not in match_result._pattern_to_op.keys(): return kwargs = dict() top_node =", "return kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']] w_e = match_result.get_op(\"cell_kernel\") w = self._weights_dict[w_e.name.replace('/read',", "import UnitRewriterBase import numpy as np import re class LSTMRewriter(UnitRewriterBase): def __init__(self, graph,", "avoid match other pattern, check it's scope''' def check_match_scope(self, match_result, scope_name): ops =", "top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']] w_e = match_result.get_op(\"cell_kernel\") w = self._weights_dict[w_e.name.replace('/read', '')] num_units = w.shape[1]//4", "not in match_result._name_to_pattern.keys(): return kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']] fill_size = match_result.get_op('fill_size')", "to avoid match other pattern, check it's scope''' def check_match_scope(self, match_result, scope_name): ops", "2: return False if re.sub(r'(_\\d+)*$', '', op_name_splits[-2]) != scope_name: if len(op_name_splits) > 2:", "!= scope_name: if len(op_name_splits) > 2: if re.sub(r'(_\\d+)*$', '', op_name_splits[-3]) != scope_name: return", "num_units = w.shape[1]//4 [wx, wh] = np.split(w, [-1 * num_units]) input_size = wx.shape[0]", "'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def process_match_result(self, match_result, pattern_name): if pattern_name ==", "kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']] w_e = match_result.get_op(\"cell_kernel\") w = self._weights_dict[w_e.name.replace('/read', '')]", "def check_match_scope(self, match_result, scope_name): ops = match_result._pattern_to_op.values() for op in ops: op_name_splits =", "= dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']] fill_size = match_result.get_op('fill_size') fill_value = match_result.get_op('fill_value') kwargs['fill_size'] =", "input_size if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def process_rnn_h_zero(self, match_result): if", "return super(LSTMRewriter, self).__init__(graph, weights_dict) def process_lstm_cell(self, match_result): if 'lstm_cell' not in match_result._pattern_to_op.keys(): return", "if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def process_rnn_h_zero(self, match_result): if 'h_zero'", "some short pattern, to avoid match other pattern, check it's scope''' def check_match_scope(self,", "weights_dict) def process_lstm_cell(self, match_result): if 'lstm_cell' not in match_result._pattern_to_op.keys(): return kwargs = dict()", "* num_units]) input_size = wx.shape[0] kwargs['num_units'] = num_units kwargs['input_size'] = input_size if hasattr(top_node,", "class LSTMRewriter(UnitRewriterBase): def __init__(self, graph, weights_dict): return super(LSTMRewriter, self).__init__(graph, weights_dict) def process_lstm_cell(self, match_result):", "self._weights_dict[w_e.name.replace('/read', '')] num_units = w.shape[1]//4 [wx, wh] = np.split(w, [-1 * num_units]) input_size", "hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def process_rnn_h_zero(self, match_result): if 'h_zero' not", "[-1 * num_units]) input_size = wx.shape[0] kwargs['num_units'] = num_units kwargs['input_size'] = input_size if", "process_match_result(self, match_result, pattern_name): if pattern_name == 'lstm_cell': self.process_lstm_cell(match_result) elif pattern_name == 'h_zero': if", "if re.sub(r'(_\\d+)*$', '', op_name_splits[-2]) != scope_name: if len(op_name_splits) > 2: if re.sub(r'(_\\d+)*$', '',", "len(op_name_splits) > 2: if re.sub(r'(_\\d+)*$', '', op_name_splits[-3]) != scope_name: return False else: return", "False if re.sub(r'(_\\d+)*$', '', op_name_splits[-2]) != scope_name: if len(op_name_splits) > 2: if re.sub(r'(_\\d+)*$',", "self.check_match_scope(match_result, 'LSTMCellZeroState'): self.process_rnn_h_zero(match_result) '''For some short pattern, to avoid match other pattern, check", "elif pattern_name == 'h_zero': if self.check_match_scope(match_result, 'LSTMCellZeroState'): self.process_rnn_h_zero(match_result) '''For some short pattern, to", "match_result): if 'lstm_cell' not in match_result._pattern_to_op.keys(): return kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']]", "op in ops: op_name_splits = op.name.split('/') if len(op_name_splits) < 2: return False if", "graph, weights_dict): return super(LSTMRewriter, self).__init__(graph, weights_dict) def process_lstm_cell(self, match_result): if 'lstm_cell' not in", "as np import re class LSTMRewriter(UnitRewriterBase): def __init__(self, graph, weights_dict): return super(LSTMRewriter, self).__init__(graph,", "pattern_name == 'lstm_cell': self.process_lstm_cell(match_result) elif pattern_name == 'h_zero': if self.check_match_scope(match_result, 'LSTMCellZeroState'): self.process_rnn_h_zero(match_result) '''For", "if self.check_match_scope(match_result, 'LSTMCellZeroState'): self.process_rnn_h_zero(match_result) '''For some short pattern, to avoid match other pattern,", "return False if re.sub(r'(_\\d+)*$', '', op_name_splits[-2]) != scope_name: if len(op_name_splits) > 2: if", "wx.shape[0] kwargs['num_units'] = num_units kwargs['input_size'] = input_size if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs", "if len(op_name_splits) > 2: if re.sub(r'(_\\d+)*$', '', op_name_splits[-3]) != scope_name: return False else:", "num_units]) input_size = wx.shape[0] kwargs['num_units'] = num_units kwargs['input_size'] = input_size if hasattr(top_node, 'kwargs'):", "LSTMRewriter(UnitRewriterBase): def __init__(self, graph, weights_dict): return super(LSTMRewriter, self).__init__(graph, weights_dict) def process_lstm_cell(self, match_result): if", "= kwargs def process_match_result(self, match_result, pattern_name): if pattern_name == 'lstm_cell': self.process_lstm_cell(match_result) elif pattern_name", "scope''' def check_match_scope(self, match_result, scope_name): ops = match_result._pattern_to_op.values() for op in ops: op_name_splits", "match other pattern, check it's scope''' def check_match_scope(self, match_result, scope_name): ops = match_result._pattern_to_op.values()", "= op.name.split('/') if len(op_name_splits) < 2: return False if re.sub(r'(_\\d+)*$', '', op_name_splits[-2]) !=", "w = self._weights_dict[w_e.name.replace('/read', '')] num_units = w.shape[1]//4 [wx, wh] = np.split(w, [-1 *", "< 2: return False if re.sub(r'(_\\d+)*$', '', op_name_splits[-2]) != scope_name: if len(op_name_splits) >", "2: if re.sub(r'(_\\d+)*$', '', op_name_splits[-3]) != scope_name: return False else: return False return", "process_rnn_h_zero(self, match_result): if 'h_zero' not in match_result._name_to_pattern.keys(): return kwargs = dict() top_node =", "fill_value.get_attr('value').float_val[0] if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def process_match_result(self, match_result, pattern_name):", "'LSTMCellZeroState'): self.process_rnn_h_zero(match_result) '''For some short pattern, to avoid match other pattern, check it's", "kwargs['fill_value'] = fill_value.get_attr('value').float_val[0] if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def process_match_result(self,", "in match_result._pattern_to_op.keys(): return kwargs = dict() top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']] w_e = match_result.get_op(\"cell_kernel\") w", "match_result.get_op(\"cell_kernel\") w = self._weights_dict[w_e.name.replace('/read', '')] num_units = w.shape[1]//4 [wx, wh] = np.split(w, [-1", "pattern_name): if pattern_name == 'lstm_cell': self.process_lstm_cell(match_result) elif pattern_name == 'h_zero': if self.check_match_scope(match_result, 'LSTMCellZeroState'):", "def __init__(self, graph, weights_dict): return super(LSTMRewriter, self).__init__(graph, weights_dict) def process_lstm_cell(self, match_result): if 'lstm_cell'", "= match_result.get_op('fill_size') fill_value = match_result.get_op('fill_value') kwargs['fill_size'] = fill_size.get_attr('value').int_val[0] kwargs['fill_value'] = fill_value.get_attr('value').float_val[0] if hasattr(top_node,", "op.name.split('/') if len(op_name_splits) < 2: return False if re.sub(r'(_\\d+)*$', '', op_name_splits[-2]) != scope_name:", "op_name_splits[-2]) != scope_name: if len(op_name_splits) > 2: if re.sub(r'(_\\d+)*$', '', op_name_splits[-3]) != scope_name:", "kwargs['num_units'] = num_units kwargs['input_size'] = input_size if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs =", "'')] num_units = w.shape[1]//4 [wx, wh] = np.split(w, [-1 * num_units]) input_size =", "= input_size if hasattr(top_node, 'kwargs'): top_node.kwargs.update(kwargs) else: top_node.kwargs = kwargs def process_rnn_h_zero(self, match_result):" ]
[ "from django.contrib.auth.models import User class CurrentUserSerializer(serializers.ModelSerializer): class Meta: model = User fields =", "<reponame>forgeno/ad-distribution<filename>adDistro/api/serializers.py from rest_framework import serializers from . import models from django.contrib.auth.models import User", "= User fields = ('username', 'email', 'password', 'id') class EbayUserSerializer(serializers.Serializer): class Meta: model", "User fields = ('username', 'email', 'password', 'id') class EbayUserSerializer(serializers.Serializer): class Meta: model =", "serializers from . import models from django.contrib.auth.models import User class CurrentUserSerializer(serializers.ModelSerializer): class Meta:", "class Meta: model = User fields = ('username', 'email', 'password', 'id') class EbayUserSerializer(serializers.Serializer):", "import serializers from . import models from django.contrib.auth.models import User class CurrentUserSerializer(serializers.ModelSerializer): class", "class CurrentUserSerializer(serializers.ModelSerializer): class Meta: model = User fields = ('username', 'email', 'password', 'id')", "User class CurrentUserSerializer(serializers.ModelSerializer): class Meta: model = User fields = ('username', 'email', 'password',", "rest_framework import serializers from . import models from django.contrib.auth.models import User class CurrentUserSerializer(serializers.ModelSerializer):", "('username', 'email', 'password', 'id') class EbayUserSerializer(serializers.Serializer): class Meta: model = models.EbayUser fields =", "from . import models from django.contrib.auth.models import User class CurrentUserSerializer(serializers.ModelSerializer): class Meta: model", "fields = ('username', 'email', 'password', 'id') class EbayUserSerializer(serializers.Serializer): class Meta: model = models.EbayUser", ". import models from django.contrib.auth.models import User class CurrentUserSerializer(serializers.ModelSerializer): class Meta: model =", "from rest_framework import serializers from . import models from django.contrib.auth.models import User class", "CurrentUserSerializer(serializers.ModelSerializer): class Meta: model = User fields = ('username', 'email', 'password', 'id') class", "'password', 'id') class EbayUserSerializer(serializers.Serializer): class Meta: model = models.EbayUser fields = ('AdDistroId', 'EbayUsername',", "model = User fields = ('username', 'email', 'password', 'id') class EbayUserSerializer(serializers.Serializer): class Meta:", "Meta: model = User fields = ('username', 'email', 'password', 'id') class EbayUserSerializer(serializers.Serializer): class", "'id') class EbayUserSerializer(serializers.Serializer): class Meta: model = models.EbayUser fields = ('AdDistroId', 'EbayUsername', 'EbayPassword')", "'email', 'password', 'id') class EbayUserSerializer(serializers.Serializer): class Meta: model = models.EbayUser fields = ('AdDistroId',", "= ('username', 'email', 'password', 'id') class EbayUserSerializer(serializers.Serializer): class Meta: model = models.EbayUser fields", "models from django.contrib.auth.models import User class CurrentUserSerializer(serializers.ModelSerializer): class Meta: model = User fields", "django.contrib.auth.models import User class CurrentUserSerializer(serializers.ModelSerializer): class Meta: model = User fields = ('username',", "import models from django.contrib.auth.models import User class CurrentUserSerializer(serializers.ModelSerializer): class Meta: model = User", "import User class CurrentUserSerializer(serializers.ModelSerializer): class Meta: model = User fields = ('username', 'email'," ]
[ "preorder[i] < root and breakpoint > 0: return False if breakpoint < 0:", "and breakpoint > 0: return False if breakpoint < 0: breakpoint = len(preorder)", "List[int] :rtype: bool \"\"\" if len(preorder) < 2: return True root = preorder[0]", "preorder): \"\"\" :type preorder: List[int] :rtype: bool \"\"\" if len(preorder) < 2: return", "< 2: return True root = preorder[0] breakpoint = -1 for i in", "len(preorder) < 2: return True root = preorder[0] breakpoint = -1 for i", "\"\"\" :type preorder: List[int] :rtype: bool \"\"\" if len(preorder) < 2: return True", "len(preorder)): if preorder[i] > root: if breakpoint < 0: breakpoint = i if", "0: breakpoint = i if preorder[i] < root and breakpoint > 0: return", "root: if breakpoint < 0: breakpoint = i if preorder[i] < root and", "class Solution(object): def verifyPreorder(self, preorder): \"\"\" :type preorder: List[int] :rtype: bool \"\"\" if", "return False if breakpoint < 0: breakpoint = len(preorder) return self.verifyPreorder(preorder[1:breakpoint]) and self.verifyPreorder(", "root = preorder[0] breakpoint = -1 for i in range(1, len(preorder)): if preorder[i]", "-1 for i in range(1, len(preorder)): if preorder[i] > root: if breakpoint <", "in range(1, len(preorder)): if preorder[i] > root: if breakpoint < 0: breakpoint =", "= -1 for i in range(1, len(preorder)): if preorder[i] > root: if breakpoint", "Solution(object): def verifyPreorder(self, preorder): \"\"\" :type preorder: List[int] :rtype: bool \"\"\" if len(preorder)", "0: return False if breakpoint < 0: breakpoint = len(preorder) return self.verifyPreorder(preorder[1:breakpoint]) and", "preorder: List[int] :rtype: bool \"\"\" if len(preorder) < 2: return True root =", "range(1, len(preorder)): if preorder[i] > root: if breakpoint < 0: breakpoint = i", "False if breakpoint < 0: breakpoint = len(preorder) return self.verifyPreorder(preorder[1:breakpoint]) and self.verifyPreorder( preorder[breakpoint:]", "i if preorder[i] < root and breakpoint > 0: return False if breakpoint", "breakpoint = i if preorder[i] < root and breakpoint > 0: return False", "= preorder[0] breakpoint = -1 for i in range(1, len(preorder)): if preorder[i] >", "i in range(1, len(preorder)): if preorder[i] > root: if breakpoint < 0: breakpoint", ":rtype: bool \"\"\" if len(preorder) < 2: return True root = preorder[0] breakpoint", "< 0: breakpoint = i if preorder[i] < root and breakpoint > 0:", "preorder[i] > root: if breakpoint < 0: breakpoint = i if preorder[i] <", "= i if preorder[i] < root and breakpoint > 0: return False if", "if preorder[i] > root: if breakpoint < 0: breakpoint = i if preorder[i]", "\"\"\" if len(preorder) < 2: return True root = preorder[0] breakpoint = -1", "< root and breakpoint > 0: return False if breakpoint < 0: breakpoint", "for i in range(1, len(preorder)): if preorder[i] > root: if breakpoint < 0:", "preorder[0] breakpoint = -1 for i in range(1, len(preorder)): if preorder[i] > root:", "if len(preorder) < 2: return True root = preorder[0] breakpoint = -1 for", "True root = preorder[0] breakpoint = -1 for i in range(1, len(preorder)): if", "def verifyPreorder(self, preorder): \"\"\" :type preorder: List[int] :rtype: bool \"\"\" if len(preorder) <", "if breakpoint < 0: breakpoint = i if preorder[i] < root and breakpoint", "2: return True root = preorder[0] breakpoint = -1 for i in range(1,", "if preorder[i] < root and breakpoint > 0: return False if breakpoint <", ":type preorder: List[int] :rtype: bool \"\"\" if len(preorder) < 2: return True root", "> 0: return False if breakpoint < 0: breakpoint = len(preorder) return self.verifyPreorder(preorder[1:breakpoint])", "return True root = preorder[0] breakpoint = -1 for i in range(1, len(preorder)):", "breakpoint = -1 for i in range(1, len(preorder)): if preorder[i] > root: if", "> root: if breakpoint < 0: breakpoint = i if preorder[i] < root", "root and breakpoint > 0: return False if breakpoint < 0: breakpoint =", "breakpoint < 0: breakpoint = i if preorder[i] < root and breakpoint >", "breakpoint > 0: return False if breakpoint < 0: breakpoint = len(preorder) return", "if breakpoint < 0: breakpoint = len(preorder) return self.verifyPreorder(preorder[1:breakpoint]) and self.verifyPreorder( preorder[breakpoint:] )", "bool \"\"\" if len(preorder) < 2: return True root = preorder[0] breakpoint =", "verifyPreorder(self, preorder): \"\"\" :type preorder: List[int] :rtype: bool \"\"\" if len(preorder) < 2:" ]
[ "Sun from poliastro.twobody import Orbit from tqdm.auto import tqdm import os from space_mission_design.power", "from space_mission_design.visualisation import ploting_map import numpy as np import matplotlib.pyplot as plt plt.style.use(\"presentation\")", "as u from poliastro.bodies import Earth, Mars, Sun from poliastro.twobody import Orbit from", "tqdm.auto import tqdm import os from space_mission_design.power import body_illumination wrapper = celestlab_wrapper.WrapperCelestlab(scilab_path=\"/home/tavant/Data/tavant/CSE-perso/tools/CNES/scilab-6.0.2/bin/\", celestlab_loader=\"/home/tavant/Data/tavant/CSE-perso/IonSat/power/loader_celestlab.sce\")", "as plt plt.style.use(\"presentation\") from astropy import units as u from poliastro.bodies import Earth,", "import units as u from poliastro.bodies import Earth, Mars, Sun from poliastro.twobody import", "import matplotlib.pyplot as plt plt.style.use(\"presentation\") from astropy import units as u from poliastro.bodies", "import numpy as np import matplotlib.pyplot as plt plt.style.use(\"presentation\") from astropy import units", "astropy import units as u from poliastro.bodies import Earth, Mars, Sun from poliastro.twobody", "\"inc\": 51*np.pi/180, \"sma\": (Earth.R_mean + 300 * u.km).to(u.m).value, \"outputFileName\":\"results_ionsat.h5\" } wrapper.write_paramerter_file(specitic_params) wrapper.launch_celestlab(\"ionsat_power.sce\") sun_position,", "import os from space_mission_design.power import body_illumination wrapper = celestlab_wrapper.WrapperCelestlab(scilab_path=\"/home/tavant/Data/tavant/CSE-perso/tools/CNES/scilab-6.0.2/bin/\", celestlab_loader=\"/home/tavant/Data/tavant/CSE-perso/IonSat/power/loader_celestlab.sce\") print(\"Small example :", "os from space_mission_design.power import body_illumination wrapper = celestlab_wrapper.WrapperCelestlab(scilab_path=\"/home/tavant/Data/tavant/CSE-perso/tools/CNES/scilab-6.0.2/bin/\", celestlab_loader=\"/home/tavant/Data/tavant/CSE-perso/IonSat/power/loader_celestlab.sce\") print(\"Small example : propagate", "celestlab_wrapper.WrapperCelestlab(scilab_path=\"/home/tavant/Data/tavant/CSE-perso/tools/CNES/scilab-6.0.2/bin/\", celestlab_loader=\"/home/tavant/Data/tavant/CSE-perso/IonSat/power/loader_celestlab.sce\") print(\"Small example : propagate and plot\") specitic_params = {\"year\":2024, \"hour\":12, \"inc\":", "\"sma\": (Earth.R_mean + 300 * u.km).to(u.m).value, \"outputFileName\":\"results_ionsat.h5\" } wrapper.write_paramerter_file(specitic_params) wrapper.launch_celestlab(\"ionsat_power.sce\") sun_position, ecf_position, eclipses,", "poliastro.twobody import Orbit from tqdm.auto import tqdm import os from space_mission_design.power import body_illumination", "plt plt.style.use(\"presentation\") from astropy import units as u from poliastro.bodies import Earth, Mars,", "plot\") specitic_params = {\"year\":2024, \"hour\":12, \"inc\": 51*np.pi/180, \"sma\": (Earth.R_mean + 300 * u.km).to(u.m).value,", "} wrapper.write_paramerter_file(specitic_params) wrapper.launch_celestlab(\"ionsat_power.sce\") sun_position, ecf_position, eclipses, cj_date = wrapper.read_celestlab_results(\"results_ionsat.h5\") ploting_map.plot_planisphere(ecf_position) ploting_map.plot_poles(ecf_position) plt.show() #", "* u.km).to(u.m).value, \"outputFileName\":\"results_ionsat.h5\" } wrapper.write_paramerter_file(specitic_params) wrapper.launch_celestlab(\"ionsat_power.sce\") sun_position, ecf_position, eclipses, cj_date = wrapper.read_celestlab_results(\"results_ionsat.h5\") ploting_map.plot_planisphere(ecf_position)", "matplotlib.pyplot as plt plt.style.use(\"presentation\") from astropy import units as u from poliastro.bodies import", "import Earth, Mars, Sun from poliastro.twobody import Orbit from tqdm.auto import tqdm import", "celestlab_loader=\"/home/tavant/Data/tavant/CSE-perso/IonSat/power/loader_celestlab.sce\") print(\"Small example : propagate and plot\") specitic_params = {\"year\":2024, \"hour\":12, \"inc\": 51*np.pi/180,", "(Earth.R_mean + 300 * u.km).to(u.m).value, \"outputFileName\":\"results_ionsat.h5\" } wrapper.write_paramerter_file(specitic_params) wrapper.launch_celestlab(\"ionsat_power.sce\") sun_position, ecf_position, eclipses, cj_date", "body_illumination wrapper = celestlab_wrapper.WrapperCelestlab(scilab_path=\"/home/tavant/Data/tavant/CSE-perso/tools/CNES/scilab-6.0.2/bin/\", celestlab_loader=\"/home/tavant/Data/tavant/CSE-perso/IonSat/power/loader_celestlab.sce\") print(\"Small example : propagate and plot\") specitic_params =", "import body_illumination wrapper = celestlab_wrapper.WrapperCelestlab(scilab_path=\"/home/tavant/Data/tavant/CSE-perso/tools/CNES/scilab-6.0.2/bin/\", celestlab_loader=\"/home/tavant/Data/tavant/CSE-perso/IonSat/power/loader_celestlab.sce\") print(\"Small example : propagate and plot\") specitic_params", "numpy as np import matplotlib.pyplot as plt plt.style.use(\"presentation\") from astropy import units as", ": propagate and plot\") specitic_params = {\"year\":2024, \"hour\":12, \"inc\": 51*np.pi/180, \"sma\": (Earth.R_mean +", "Earth, Mars, Sun from poliastro.twobody import Orbit from tqdm.auto import tqdm import os", "celestlab_wrapper from space_mission_design.visualisation import ploting_map import numpy as np import matplotlib.pyplot as plt", "tqdm import os from space_mission_design.power import body_illumination wrapper = celestlab_wrapper.WrapperCelestlab(scilab_path=\"/home/tavant/Data/tavant/CSE-perso/tools/CNES/scilab-6.0.2/bin/\", celestlab_loader=\"/home/tavant/Data/tavant/CSE-perso/IonSat/power/loader_celestlab.sce\") print(\"Small example", "51*np.pi/180, \"sma\": (Earth.R_mean + 300 * u.km).to(u.m).value, \"outputFileName\":\"results_ionsat.h5\" } wrapper.write_paramerter_file(specitic_params) wrapper.launch_celestlab(\"ionsat_power.sce\") sun_position, ecf_position,", "from space_mission_design.celestlab import celestlab_wrapper from space_mission_design.visualisation import ploting_map import numpy as np import", "u from poliastro.bodies import Earth, Mars, Sun from poliastro.twobody import Orbit from tqdm.auto", "from space_mission_design.power import body_illumination wrapper = celestlab_wrapper.WrapperCelestlab(scilab_path=\"/home/tavant/Data/tavant/CSE-perso/tools/CNES/scilab-6.0.2/bin/\", celestlab_loader=\"/home/tavant/Data/tavant/CSE-perso/IonSat/power/loader_celestlab.sce\") print(\"Small example : propagate and", "import celestlab_wrapper from space_mission_design.visualisation import ploting_map import numpy as np import matplotlib.pyplot as", "from astropy import units as u from poliastro.bodies import Earth, Mars, Sun from", "space_mission_design from space_mission_design.celestlab import celestlab_wrapper from space_mission_design.visualisation import ploting_map import numpy as np", "from poliastro.bodies import Earth, Mars, Sun from poliastro.twobody import Orbit from tqdm.auto import", "u.km).to(u.m).value, \"outputFileName\":\"results_ionsat.h5\" } wrapper.write_paramerter_file(specitic_params) wrapper.launch_celestlab(\"ionsat_power.sce\") sun_position, ecf_position, eclipses, cj_date = wrapper.read_celestlab_results(\"results_ionsat.h5\") ploting_map.plot_planisphere(ecf_position) ploting_map.plot_poles(ecf_position)", "\"hour\":12, \"inc\": 51*np.pi/180, \"sma\": (Earth.R_mean + 300 * u.km).to(u.m).value, \"outputFileName\":\"results_ionsat.h5\" } wrapper.write_paramerter_file(specitic_params) wrapper.launch_celestlab(\"ionsat_power.sce\")", "np import matplotlib.pyplot as plt plt.style.use(\"presentation\") from astropy import units as u from", "wrapper.write_paramerter_file(specitic_params) wrapper.launch_celestlab(\"ionsat_power.sce\") sun_position, ecf_position, eclipses, cj_date = wrapper.read_celestlab_results(\"results_ionsat.h5\") ploting_map.plot_planisphere(ecf_position) ploting_map.plot_poles(ecf_position) plt.show() # plt.savefig(\"map_51deg.png\",", "space_mission_design.celestlab import celestlab_wrapper from space_mission_design.visualisation import ploting_map import numpy as np import matplotlib.pyplot", "units as u from poliastro.bodies import Earth, Mars, Sun from poliastro.twobody import Orbit", "{\"year\":2024, \"hour\":12, \"inc\": 51*np.pi/180, \"sma\": (Earth.R_mean + 300 * u.km).to(u.m).value, \"outputFileName\":\"results_ionsat.h5\" } wrapper.write_paramerter_file(specitic_params)", "space_mission_design.visualisation import ploting_map import numpy as np import matplotlib.pyplot as plt plt.style.use(\"presentation\") from", "from tqdm.auto import tqdm import os from space_mission_design.power import body_illumination wrapper = celestlab_wrapper.WrapperCelestlab(scilab_path=\"/home/tavant/Data/tavant/CSE-perso/tools/CNES/scilab-6.0.2/bin/\",", "= celestlab_wrapper.WrapperCelestlab(scilab_path=\"/home/tavant/Data/tavant/CSE-perso/tools/CNES/scilab-6.0.2/bin/\", celestlab_loader=\"/home/tavant/Data/tavant/CSE-perso/IonSat/power/loader_celestlab.sce\") print(\"Small example : propagate and plot\") specitic_params = {\"year\":2024, \"hour\":12,", "import ploting_map import numpy as np import matplotlib.pyplot as plt plt.style.use(\"presentation\") from astropy", "Mars, Sun from poliastro.twobody import Orbit from tqdm.auto import tqdm import os from", "example : propagate and plot\") specitic_params = {\"year\":2024, \"hour\":12, \"inc\": 51*np.pi/180, \"sma\": (Earth.R_mean", "wrapper.launch_celestlab(\"ionsat_power.sce\") sun_position, ecf_position, eclipses, cj_date = wrapper.read_celestlab_results(\"results_ionsat.h5\") ploting_map.plot_planisphere(ecf_position) ploting_map.plot_poles(ecf_position) plt.show() # plt.savefig(\"map_51deg.png\", dpi=300)", "ploting_map import numpy as np import matplotlib.pyplot as plt plt.style.use(\"presentation\") from astropy import", "space_mission_design.power import body_illumination wrapper = celestlab_wrapper.WrapperCelestlab(scilab_path=\"/home/tavant/Data/tavant/CSE-perso/tools/CNES/scilab-6.0.2/bin/\", celestlab_loader=\"/home/tavant/Data/tavant/CSE-perso/IonSat/power/loader_celestlab.sce\") print(\"Small example : propagate and plot\")", "Orbit from tqdm.auto import tqdm import os from space_mission_design.power import body_illumination wrapper =", "print(\"Small example : propagate and plot\") specitic_params = {\"year\":2024, \"hour\":12, \"inc\": 51*np.pi/180, \"sma\":", "and plot\") specitic_params = {\"year\":2024, \"hour\":12, \"inc\": 51*np.pi/180, \"sma\": (Earth.R_mean + 300 *", "import Orbit from tqdm.auto import tqdm import os from space_mission_design.power import body_illumination wrapper", "propagate and plot\") specitic_params = {\"year\":2024, \"hour\":12, \"inc\": 51*np.pi/180, \"sma\": (Earth.R_mean + 300", "import tqdm import os from space_mission_design.power import body_illumination wrapper = celestlab_wrapper.WrapperCelestlab(scilab_path=\"/home/tavant/Data/tavant/CSE-perso/tools/CNES/scilab-6.0.2/bin/\", celestlab_loader=\"/home/tavant/Data/tavant/CSE-perso/IonSat/power/loader_celestlab.sce\") print(\"Small", "from poliastro.twobody import Orbit from tqdm.auto import tqdm import os from space_mission_design.power import", "\"outputFileName\":\"results_ionsat.h5\" } wrapper.write_paramerter_file(specitic_params) wrapper.launch_celestlab(\"ionsat_power.sce\") sun_position, ecf_position, eclipses, cj_date = wrapper.read_celestlab_results(\"results_ionsat.h5\") ploting_map.plot_planisphere(ecf_position) ploting_map.plot_poles(ecf_position) plt.show()", "= {\"year\":2024, \"hour\":12, \"inc\": 51*np.pi/180, \"sma\": (Earth.R_mean + 300 * u.km).to(u.m).value, \"outputFileName\":\"results_ionsat.h5\" }", "poliastro.bodies import Earth, Mars, Sun from poliastro.twobody import Orbit from tqdm.auto import tqdm", "wrapper = celestlab_wrapper.WrapperCelestlab(scilab_path=\"/home/tavant/Data/tavant/CSE-perso/tools/CNES/scilab-6.0.2/bin/\", celestlab_loader=\"/home/tavant/Data/tavant/CSE-perso/IonSat/power/loader_celestlab.sce\") print(\"Small example : propagate and plot\") specitic_params = {\"year\":2024,", "+ 300 * u.km).to(u.m).value, \"outputFileName\":\"results_ionsat.h5\" } wrapper.write_paramerter_file(specitic_params) wrapper.launch_celestlab(\"ionsat_power.sce\") sun_position, ecf_position, eclipses, cj_date =", "plt.style.use(\"presentation\") from astropy import units as u from poliastro.bodies import Earth, Mars, Sun", "import space_mission_design from space_mission_design.celestlab import celestlab_wrapper from space_mission_design.visualisation import ploting_map import numpy as", "specitic_params = {\"year\":2024, \"hour\":12, \"inc\": 51*np.pi/180, \"sma\": (Earth.R_mean + 300 * u.km).to(u.m).value, \"outputFileName\":\"results_ionsat.h5\"", "as np import matplotlib.pyplot as plt plt.style.use(\"presentation\") from astropy import units as u", "300 * u.km).to(u.m).value, \"outputFileName\":\"results_ionsat.h5\" } wrapper.write_paramerter_file(specitic_params) wrapper.launch_celestlab(\"ionsat_power.sce\") sun_position, ecf_position, eclipses, cj_date = wrapper.read_celestlab_results(\"results_ionsat.h5\")" ]
[ "response == 'y': ctr.X() ctr.A() ctr.pause(3) ctr.A() ctr.pause(1) ctr.A() ctr.pause(15) ctr.A() ctr.pause(7) ctr.A()", "== 'y': ctr.X() ctr.A() ctr.pause(3) ctr.A() ctr.pause(1) ctr.A() ctr.pause(15) ctr.A() ctr.pause(7) ctr.A() ctr.pause(1)", "Controller ctr = Controller() ctr.LS() ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response", "ctr.h() response = input(\"Restart(y/n): \") while response == 'y': ctr.X() ctr.A() ctr.pause(3) ctr.A()", "import Controller ctr = Controller() ctr.LS() ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h()", "ctr.A() ctr.pause(0.3) ctr.h() response = input(\"Restart(y/n): \") while response == 'y': ctr.X() ctr.A()", "= input(\"Restart(y/n): \") while response == 'y': ctr.X() ctr.A() ctr.pause(3) ctr.A() ctr.pause(1) ctr.A()", "ctr.pause(3) ctr.A() ctr.pause(1) ctr.A() ctr.pause(15) ctr.A() ctr.pause(7) ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3)", "ctr.pause(0.3) ctr.h() response = input(\"Restart(y/n): \") while response == 'y': ctr.X() ctr.A() ctr.pause(3)", "from NXController import Controller ctr = Controller() ctr.LS() ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A()", "'y': ctr.X() ctr.A() ctr.pause(3) ctr.A() ctr.pause(1) ctr.A() ctr.pause(15) ctr.A() ctr.pause(7) ctr.A() ctr.pause(1) ctr.A()", "ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response = input(\"Restart(y/n): \") while response == 'y': ctr.X()", "\") while response == 'y': ctr.X() ctr.A() ctr.pause(3) ctr.A() ctr.pause(1) ctr.A() ctr.pause(15) ctr.A()", "Controller() ctr.LS() ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response = input(\"Restart(y/n): \")", "ctr.X() ctr.A() ctr.pause(3) ctr.A() ctr.pause(1) ctr.A() ctr.pause(15) ctr.A() ctr.pause(7) ctr.A() ctr.pause(1) ctr.A() ctr.pause(1)", "ctr.pause(15) ctr.A() ctr.pause(7) ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response = input(\"Restart(y/n):", "ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response = input(\"Restart(y/n): \") while response", "ctr = Controller() ctr.LS() ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response =", "ctr.A() ctr.pause(1) ctr.A() ctr.pause(15) ctr.A() ctr.pause(7) ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h()", "ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response = input(\"Restart(y/n): \") while response ==", "ctr.pause(1) ctr.A() ctr.pause(15) ctr.A() ctr.pause(7) ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response", "NXController import Controller ctr = Controller() ctr.LS() ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3)", "ctr.A() ctr.pause(3) ctr.A() ctr.pause(1) ctr.A() ctr.pause(15) ctr.A() ctr.pause(7) ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A()", "= Controller() ctr.LS() ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response = input(\"Restart(y/n):", "ctr.A() ctr.pause(15) ctr.A() ctr.pause(7) ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response =", "ctr.LS() ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response = input(\"Restart(y/n): \") while", "ctr.A() ctr.pause(7) ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response = input(\"Restart(y/n): \")", "ctr.pause(7) ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response = input(\"Restart(y/n): \") ctr.A()", "ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response = input(\"Restart(y/n): \") while response == 'y':", "response = input(\"Restart(y/n): \") while response == 'y': ctr.X() ctr.A() ctr.pause(3) ctr.A() ctr.pause(1)", "while response == 'y': ctr.X() ctr.A() ctr.pause(3) ctr.A() ctr.pause(1) ctr.A() ctr.pause(15) ctr.A() ctr.pause(7)", "ctr.A() ctr.pause(1) ctr.A() ctr.pause(1) ctr.A() ctr.pause(0.3) ctr.h() response = input(\"Restart(y/n): \") ctr.A() ctr.close()", "input(\"Restart(y/n): \") while response == 'y': ctr.X() ctr.A() ctr.pause(3) ctr.A() ctr.pause(1) ctr.A() ctr.pause(15)" ]
[ "Create the agent model = PPO2(MlpPolicy, env, verbose=0, learning_rate=1.0e-4) model.learn(total_timesteps=num_episodes) env.close() print(\"Successfully trained\")", "(C) 2020 IBM. All Rights Reserved. # # See LICENSE.txt file in the", "information. # from gym_unity.envs import UnityEnv from stable_baselines import PPO2 from stable_baselines.common.policies import", "same folder than this Python script. # MacOS: Copy the Unity binary to", "import PPO2 from stable_baselines.common.policies import MlpPolicy # Linux: The env_config.json and the Unity", "gym_unity.envs import UnityEnv from stable_baselines import PPO2 from stable_baselines.common.policies import MlpPolicy # Linux:", "the same folder than this Python script. # MacOS: Copy the Unity binary", "+ file_name num_episodes = 500 class StableBasGym: @staticmethod def run(): # LINUX: Disable", "the EnvBuild folder. file_name = 'DroneDelivery' env_name = \"../EnvBuild/\" + file_name num_episodes =", "PPO2 from stable_baselines.common.policies import MlpPolicy # Linux: The env_config.json and the Unity binary", "class StableBasGym: @staticmethod def run(): # LINUX: Disable the Unity window -> no_graphics=True", "StableBasGym: @staticmethod def run(): # LINUX: Disable the Unity window -> no_graphics=True env", "# # See LICENSE.txt file in the root directory # of this source", "The env_config.json and the Unity binary must be on the same folder than", "Unity binary must be on the same folder than this Python script. #", "def run(): # LINUX: Disable the Unity window -> no_graphics=True env = UnityEnv(env_name,", "window -> no_graphics=True env = UnityEnv(env_name, worker_id=1000, use_visual=False, uint8_visual=False, allow_multiple_visual_obs=False, no_graphics=False) # Create", "Reserved. # # See LICENSE.txt file in the root directory # of this", "the Unity binary to the EnvBuild folder. file_name = 'DroneDelivery' env_name = \"../EnvBuild/\"", "All Rights Reserved. # # See LICENSE.txt file in the root directory #", "= 'DroneDelivery' env_name = \"../EnvBuild/\" + file_name num_episodes = 500 class StableBasGym: @staticmethod", "MacOS: Copy the Unity binary to the EnvBuild folder. file_name = 'DroneDelivery' env_name", "# # Copyright (C) 2020 IBM. All Rights Reserved. # # See LICENSE.txt", "in the root directory # of this source tree for licensing information. #", "# Linux: The env_config.json and the Unity binary must be on the same", "no_graphics=True env = UnityEnv(env_name, worker_id=1000, use_visual=False, uint8_visual=False, allow_multiple_visual_obs=False, no_graphics=False) # Create the agent", "# Create the agent model = PPO2(MlpPolicy, env, verbose=0, learning_rate=1.0e-4) model.learn(total_timesteps=num_episodes) env.close() print(\"Successfully", "model = PPO2(MlpPolicy, env, verbose=0, learning_rate=1.0e-4) model.learn(total_timesteps=num_episodes) env.close() print(\"Successfully trained\") if __name__ ==", "binary to the EnvBuild folder. file_name = 'DroneDelivery' env_name = \"../EnvBuild/\" + file_name", "num_episodes = 500 class StableBasGym: @staticmethod def run(): # LINUX: Disable the Unity", "500 class StableBasGym: @staticmethod def run(): # LINUX: Disable the Unity window ->", "file_name num_episodes = 500 class StableBasGym: @staticmethod def run(): # LINUX: Disable the", "script. # MacOS: Copy the Unity binary to the EnvBuild folder. file_name =", "directory # of this source tree for licensing information. # from gym_unity.envs import", "to the EnvBuild folder. file_name = 'DroneDelivery' env_name = \"../EnvBuild/\" + file_name num_episodes", "# LINUX: Disable the Unity window -> no_graphics=True env = UnityEnv(env_name, worker_id=1000, use_visual=False,", "stable_baselines import PPO2 from stable_baselines.common.policies import MlpPolicy # Linux: The env_config.json and the", "no_graphics=False) # Create the agent model = PPO2(MlpPolicy, env, verbose=0, learning_rate=1.0e-4) model.learn(total_timesteps=num_episodes) env.close()", "must be on the same folder than this Python script. # MacOS: Copy", "from stable_baselines import PPO2 from stable_baselines.common.policies import MlpPolicy # Linux: The env_config.json and", "= 500 class StableBasGym: @staticmethod def run(): # LINUX: Disable the Unity window", "# from gym_unity.envs import UnityEnv from stable_baselines import PPO2 from stable_baselines.common.policies import MlpPolicy", "# Copyright (C) 2020 IBM. All Rights Reserved. # # See LICENSE.txt file", "LINUX: Disable the Unity window -> no_graphics=True env = UnityEnv(env_name, worker_id=1000, use_visual=False, uint8_visual=False,", "file in the root directory # of this source tree for licensing information.", "and the Unity binary must be on the same folder than this Python", "folder than this Python script. # MacOS: Copy the Unity binary to the", "root directory # of this source tree for licensing information. # from gym_unity.envs", "allow_multiple_visual_obs=False, no_graphics=False) # Create the agent model = PPO2(MlpPolicy, env, verbose=0, learning_rate=1.0e-4) model.learn(total_timesteps=num_episodes)", "than this Python script. # MacOS: Copy the Unity binary to the EnvBuild", "\"../EnvBuild/\" + file_name num_episodes = 500 class StableBasGym: @staticmethod def run(): # LINUX:", "licensing information. # from gym_unity.envs import UnityEnv from stable_baselines import PPO2 from stable_baselines.common.policies", "this source tree for licensing information. # from gym_unity.envs import UnityEnv from stable_baselines", "LICENSE.txt file in the root directory # of this source tree for licensing", "source tree for licensing information. # from gym_unity.envs import UnityEnv from stable_baselines import", "the root directory # of this source tree for licensing information. # from", "folder. file_name = 'DroneDelivery' env_name = \"../EnvBuild/\" + file_name num_episodes = 500 class", "be on the same folder than this Python script. # MacOS: Copy the", "'DroneDelivery' env_name = \"../EnvBuild/\" + file_name num_episodes = 500 class StableBasGym: @staticmethod def", "binary must be on the same folder than this Python script. # MacOS:", "the agent model = PPO2(MlpPolicy, env, verbose=0, learning_rate=1.0e-4) model.learn(total_timesteps=num_episodes) env.close() print(\"Successfully trained\") if", "the Unity window -> no_graphics=True env = UnityEnv(env_name, worker_id=1000, use_visual=False, uint8_visual=False, allow_multiple_visual_obs=False, no_graphics=False)", "-> no_graphics=True env = UnityEnv(env_name, worker_id=1000, use_visual=False, uint8_visual=False, allow_multiple_visual_obs=False, no_graphics=False) # Create the", "import MlpPolicy # Linux: The env_config.json and the Unity binary must be on", "this Python script. # MacOS: Copy the Unity binary to the EnvBuild folder.", "See LICENSE.txt file in the root directory # of this source tree for", "from stable_baselines.common.policies import MlpPolicy # Linux: The env_config.json and the Unity binary must", "# MacOS: Copy the Unity binary to the EnvBuild folder. file_name = 'DroneDelivery'", "agent model = PPO2(MlpPolicy, env, verbose=0, learning_rate=1.0e-4) model.learn(total_timesteps=num_episodes) env.close() print(\"Successfully trained\") if __name__", "Python script. # MacOS: Copy the Unity binary to the EnvBuild folder. file_name", "Disable the Unity window -> no_graphics=True env = UnityEnv(env_name, worker_id=1000, use_visual=False, uint8_visual=False, allow_multiple_visual_obs=False,", "import UnityEnv from stable_baselines import PPO2 from stable_baselines.common.policies import MlpPolicy # Linux: The", "worker_id=1000, use_visual=False, uint8_visual=False, allow_multiple_visual_obs=False, no_graphics=False) # Create the agent model = PPO2(MlpPolicy, env,", "tree for licensing information. # from gym_unity.envs import UnityEnv from stable_baselines import PPO2", "Unity binary to the EnvBuild folder. file_name = 'DroneDelivery' env_name = \"../EnvBuild/\" +", "= \"../EnvBuild/\" + file_name num_episodes = 500 class StableBasGym: @staticmethod def run(): #", "<reponame>IBM/vsrl-examples # # Copyright (C) 2020 IBM. All Rights Reserved. # # See", "Copy the Unity binary to the EnvBuild folder. file_name = 'DroneDelivery' env_name =", "EnvBuild folder. file_name = 'DroneDelivery' env_name = \"../EnvBuild/\" + file_name num_episodes = 500", "= PPO2(MlpPolicy, env, verbose=0, learning_rate=1.0e-4) model.learn(total_timesteps=num_episodes) env.close() print(\"Successfully trained\") if __name__ == '__main__':", "= UnityEnv(env_name, worker_id=1000, use_visual=False, uint8_visual=False, allow_multiple_visual_obs=False, no_graphics=False) # Create the agent model =", "env_config.json and the Unity binary must be on the same folder than this", "uint8_visual=False, allow_multiple_visual_obs=False, no_graphics=False) # Create the agent model = PPO2(MlpPolicy, env, verbose=0, learning_rate=1.0e-4)", "run(): # LINUX: Disable the Unity window -> no_graphics=True env = UnityEnv(env_name, worker_id=1000,", "MlpPolicy # Linux: The env_config.json and the Unity binary must be on the", "Copyright (C) 2020 IBM. All Rights Reserved. # # See LICENSE.txt file in", "@staticmethod def run(): # LINUX: Disable the Unity window -> no_graphics=True env =", "Rights Reserved. # # See LICENSE.txt file in the root directory # of", "# of this source tree for licensing information. # from gym_unity.envs import UnityEnv", "UnityEnv from stable_baselines import PPO2 from stable_baselines.common.policies import MlpPolicy # Linux: The env_config.json", "UnityEnv(env_name, worker_id=1000, use_visual=False, uint8_visual=False, allow_multiple_visual_obs=False, no_graphics=False) # Create the agent model = PPO2(MlpPolicy,", "from gym_unity.envs import UnityEnv from stable_baselines import PPO2 from stable_baselines.common.policies import MlpPolicy #", "# See LICENSE.txt file in the root directory # of this source tree", "use_visual=False, uint8_visual=False, allow_multiple_visual_obs=False, no_graphics=False) # Create the agent model = PPO2(MlpPolicy, env, verbose=0,", "env_name = \"../EnvBuild/\" + file_name num_episodes = 500 class StableBasGym: @staticmethod def run():", "Linux: The env_config.json and the Unity binary must be on the same folder", "on the same folder than this Python script. # MacOS: Copy the Unity", "PPO2(MlpPolicy, env, verbose=0, learning_rate=1.0e-4) model.learn(total_timesteps=num_episodes) env.close() print(\"Successfully trained\") if __name__ == '__main__': StableBasGym().run()", "of this source tree for licensing information. # from gym_unity.envs import UnityEnv from", "IBM. All Rights Reserved. # # See LICENSE.txt file in the root directory", "2020 IBM. All Rights Reserved. # # See LICENSE.txt file in the root", "the Unity binary must be on the same folder than this Python script.", "for licensing information. # from gym_unity.envs import UnityEnv from stable_baselines import PPO2 from", "env = UnityEnv(env_name, worker_id=1000, use_visual=False, uint8_visual=False, allow_multiple_visual_obs=False, no_graphics=False) # Create the agent model", "stable_baselines.common.policies import MlpPolicy # Linux: The env_config.json and the Unity binary must be", "Unity window -> no_graphics=True env = UnityEnv(env_name, worker_id=1000, use_visual=False, uint8_visual=False, allow_multiple_visual_obs=False, no_graphics=False) #", "file_name = 'DroneDelivery' env_name = \"../EnvBuild/\" + file_name num_episodes = 500 class StableBasGym:" ]
[ "is fixed assert param.multi is multi param = PDFParameter(initial=initial, limits=limits) assert param.initial ==", "results = PDF._prepare_parameters(parameters, 2) parameters, is_multi, lookup = results assert len(parameters) == 4", "sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameter_names ==", "== 1 assert len(lookup[0]) == 2 parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True),", "np.array([[0, 1]])) pdf = PDF(2, normal_pdf, parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations", "== 0.5 with pytest.raises(ValueError): pdf.update_parameters_initial(mean0=2, sigma0=0.4, sigma1=0.5) with pytest.raises(ValueError): pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5, sigma2=0.5)", "= PDFParameter(initial=initial, limits=limits, fixed=fixed, multi=multi) assert param.initial == initial assert param.limits == limits", "pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 2]]))", "limits assert param.fixed is False assert param.multi is False def test_pdf_class(): parameters =", "dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(1, normal_pdf, parameters)", "assert np.array_equal(pdf._lookup_parameters(initial, 0), np.array([0, 0.1])) assert np.array_equal(pdf._lookup_parameters(initial, 1), np.array([0, 0.3])) def test_call(): parameters", "is True # noinspection DuplicatedCode def test_prepare_multi_illumination_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)),", "assert pdf.n_free_parameters == 3 pdf.update_parameters_fixed(sigma1=True) assert pdf.n_free_parameters == 2 def test_parameter_names(): parameters =", "np.array([[0, 2], [1, 3]])) key_array = np.array(list(pdf.parameters.keys())) assert np.array_equal(key_array[pdf._lookup[0]], [\"mean0\", \"sigma0\"]) assert np.array_equal(key_array[pdf._lookup[1]],", "normal_pdf, parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations == 1 assert len(pdf.parameters) ==", "= np.exp(-lambda_) np.testing.assert_allclose(np.trapz(y, x), 1 - pedestal_contribution, rtol=1e-3) def test_from_name(): pdf = PDF.from_name(\"SiPMGentile\",", "limits=(0, 2), multi=True), ) results = PDF._prepare_parameters(parameters, 1) parameters, is_multi, lookup = results", "assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) limit[0] = 1 assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3)", "assert iminuit_kwargs[\"limit_sigma0\"] == (0, 2) assert iminuit_kwargs[\"limit_sigma1\"] == (1, 2) assert iminuit_kwargs[\"fix_mean\"] is", "False def test_pdf_class(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), )", ") pdf = PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma0\"].initial ==", "limits assert param.fixed is fixed assert param.multi is multi param = PDFParameter(initial=initial, limits=limits)", "assert pdf.parameters[\"mean\"].limits == (-2, 2) assert pdf.parameters[\"sigma\"].limits == (0, 2) pdf.update_parameters_limits(mean=(-3, 3), sigma=(0,", "limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.3) initial = np.array(list(pdf.initial.values()))", "param.multi is False def test_pdf_class(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0,", ") pdf = PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].fixed is False assert pdf.parameters[\"sigma\"].fixed is", "2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.function", "is True # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_pdf_subclasses(PDFSubclass): pdf = PDFSubclass(n_illuminations=1) x", "0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial", "sigma1=0.2) def test_n_free_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),", "fixed=fixed, multi=multi) assert param.initial == initial assert param.limits == limits assert param.fixed is", "from numpy.testing import assert_allclose import pytest def test_pdf_parameter(): initial = 1 limits =", "= PDF._prepare_parameters(parameters, 2) parameters, is_multi, lookup = results assert len(parameters) == 4 assert", "0.1 assert iminuit_kwargs[\"sigma1\"] == 0.2 assert iminuit_kwargs[\"limit_mean\"] == (-2, 2) assert iminuit_kwargs[\"limit_sigma0\"] ==", "iminuit_kwargs[\"limit_sigma0\"] == (0, 2) assert iminuit_kwargs[\"limit_sigma1\"] == (1, 2) assert iminuit_kwargs[\"fix_mean\"] is False", "sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial ==", "(0, 2) assert iminuit_kwargs[\"limit_sigma1\"] == (1, 2) assert iminuit_kwargs[\"fix_mean\"] is False assert iminuit_kwargs[\"fix_sigma0\"]", "normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.3) initial = np.array(list(pdf.initial.values())) assert np.array_equal(pdf._lookup_parameters(initial, 0), np.array([0, 0.1])) assert np.array_equal(pdf._lookup_parameters(initial,", "1, rtol=1e-3) # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_disable_pedestal(PDFSubclass): pdf = PDFSubclass(n_illuminations=1, disable_pedestal=True)", "= dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2,", "pdf = PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].limits == (-2, 2) assert pdf.parameters[\"sigma\"].limits ==", "1) def test_update_parameters_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), )", "sigma0=0.4, sigma1=0.5, sigma2=0.5) def test_update_parameters_limits(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0,", "dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) results = PDF._prepare_parameters(parameters, 1) parameters,", "assert len(parameters) == 2 assert len(is_multi) == 2 assert len(lookup) == 1 assert", "multi = True param = PDFParameter(initial=initial, limits=limits, fixed=fixed, multi=multi) assert param.initial == initial", "== 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 2], [1, 3]])) key_array", "PDF(2, normal_pdf, parameters) assert pdf.n_free_parameters == 3 pdf.update_parameters_fixed(sigma1=True) assert pdf.n_free_parameters == 2 def", "def test_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), )", "np.linspace(-1, 6, 100) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 0), pdf._function(x, 0, 0.1)) assert_allclose(pdf(x, np.array([0,", "pdf.update_parameters_limits(sigma1=(1, 2)) pdf.update_parameters_fixed(sigma1=True) iminuit_kwargs = pdf.iminuit_kwargs assert len(iminuit_kwargs) == 9 assert iminuit_kwargs[\"mean\"] ==", "test_n_free_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf", "2 assert len(pdf.parameters) == 4 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1", "0) np.testing.assert_allclose(np.trapz(y, x), 1, rtol=1e-3) # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_disable_pedestal(PDFSubclass): pdf", "pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) assert pdf.initial == dict(mean=0, sigma0=0.1, sigma1=0.2) def", "parameters) pdf.update_parameters_initial(sigma1=0.2) pdf.update_parameters_limits(sigma1=(1, 2)) pdf.update_parameters_fixed(sigma1=True) iminuit_kwargs = pdf.iminuit_kwargs assert len(iminuit_kwargs) == 9 assert", "rtol=1e-3) def test_from_name(): pdf = PDF.from_name(\"SiPMGentile\", n_illuminations=1) assert pdf.__class__.__name__ == \"SiPMGentile\" with pytest.raises(ValueError):", "sigma0=0.4, sigma1=0.5) with pytest.raises(ValueError): pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5, sigma2=0.5) def test_update_parameters_limits(): parameters = dict(", "pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.5 with pytest.raises(ValueError):", "2) assert iminuit_kwargs[\"limit_sigma0\"] == (0, 2) assert iminuit_kwargs[\"limit_sigma1\"] == (1, 2) assert iminuit_kwargs[\"fix_mean\"]", "# noinspection DuplicatedCode def test_prepare_multi_illumination_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0,", "2) pdf.update_parameters_limits(mean=(-3, 3), sigma=(0, 4)) assert pdf.parameters[\"mean\"].limits == (-3, 3) assert pdf.parameters[\"sigma\"].limits ==", "is_multi, lookup = results assert len(parameters) == 2 assert len(is_multi) == 2 assert", "limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) results = PDF._prepare_parameters(parameters, 1) parameters,", "pdf.function == normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 3 assert pdf.parameters[\"sigma0\"].initial", "PDF.__subclasses__()) def test_disable_pedestal(PDFSubclass): pdf = PDFSubclass(n_illuminations=1, disable_pedestal=True) x = np.linspace(-5, 100, 1000) y", "2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) x = np.linspace(-1, 6, 100)", "assert param.multi is False def test_pdf_class(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1,", "assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0,", "0.2)) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1, 0.2]), 2) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1]),", "PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) assert pdf.initial == dict(mean=0, sigma0=0.1, sigma1=0.2) def test_n_free_parameters(): parameters", "3) limit[0] = 1 assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) def test_update_parameters_fixed(): parameters =", "dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) results = PDF._prepare_parameters(parameters,", "pdf(x, np.array([0, 0.1]), 1) with pytest.raises(TypeError): # noinspection PyTypeChecker pdf(x, [0, 0.1, 0.2],", "2) assert iminuit_kwargs[\"limit_sigma1\"] == (1, 2) assert iminuit_kwargs[\"fix_mean\"] is False assert iminuit_kwargs[\"fix_sigma0\"] is", "test_pdf_subclasses(PDFSubclass): pdf = PDFSubclass(n_illuminations=1) x = np.linspace(-5, 100, 1000) y = pdf(x, np.array(list(pdf.initial.values())),", "normal_pdf, parameters) assert pdf.n_free_parameters == 3 pdf.update_parameters_fixed(sigma1=True) assert pdf.n_free_parameters == 2 def test_parameter_names():", "2 assert len(lookup) == 2 assert len(lookup[0]) == 2 def test_initial(): parameters =", "param.multi is multi param = PDFParameter(initial=initial, limits=limits) assert param.initial == initial assert param.limits", "1], [0, 1]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),", "assert len(pdf.parameters) == 4 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert", "assert len(lookup) == 2 assert len(lookup[0]) == 2 def test_initial(): parameters = dict(", "multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameter_names == [\"mean\", \"sigma0\", \"sigma1\"]", ") pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) pdf.update_parameters_limits(sigma1=(1, 2)) pdf.update_parameters_fixed(sigma1=True) iminuit_kwargs = pdf.iminuit_kwargs", "normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial == 0.1", "noinspection PyTypeChecker pdf.update_parameters_limits(mean=limit) assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) limit[0] = 1 assert tuple(pdf.parameters[\"mean\"].limits)", "import pytest def test_pdf_parameter(): initial = 1 limits = (0, 4) fixed =", "is False def test_pdf_class(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)),", "parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(1,", "PDF(2, normal_pdf, parameters) assert pdf.parameter_names == [\"mean\", \"sigma0\", \"sigma1\"] def test_iminuit_kwargs(): parameters =", "assert np.array_equal(pdf._lookup, np.array([[0, 2], [1, 3]])) key_array = np.array(list(pdf.parameters.keys())) assert np.array_equal(key_array[pdf._lookup[0]], [\"mean0\", \"sigma0\"])", "0.2]), 1), pdf._function(x, 0, 0.2)) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1, 0.2]), 2) with", "= PDF(1, normal_pdf, parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations == 1 assert", "mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) results = PDF._prepare_parameters(parameters, 2)", "2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial", "pdf.parameters[\"mean\"].limits == (-2, 2) assert pdf.parameters[\"sigma\"].limits == (0, 2) pdf.update_parameters_limits(mean=(-3, 3), sigma=(0, 4))", "== 3 pdf.update_parameters_fixed(sigma1=True) assert pdf.n_free_parameters == 2 def test_parameter_names(): parameters = dict( mean=PDFParameter(initial=0,", "limits=(0, 2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].fixed is False assert", "initial assert param.limits == limits assert param.fixed is False assert param.multi is False", "iminuit_kwargs[\"sigma1\"] == 0.2 assert iminuit_kwargs[\"limit_mean\"] == (-2, 2) assert iminuit_kwargs[\"limit_sigma0\"] == (0, 2)", "[2, 3] # noinspection PyTypeChecker pdf.update_parameters_limits(mean=limit) assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) limit[0] =", "is_multi, lookup = results assert len(parameters) == 4 assert len(is_multi) == 2 assert", "= dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) results =", "numpy.testing import assert_allclose import pytest def test_pdf_parameter(): initial = 1 limits = (0,", "assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.4 pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5) assert pdf.parameters[\"mean\"].initial", "== (2, 3) limit[0] = 1 assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) def test_update_parameters_fixed():", "pdf.parameters[\"sigma\"].limits == (0, 2) pdf.update_parameters_limits(mean=(-3, 3), sigma=(0, 4)) assert pdf.parameters[\"mean\"].limits == (-3, 3)", "assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1]])) pdf = PDF(2, normal_pdf, parameters)", "PDFSubclass(n_illuminations=1) x = np.linspace(-5, 100, 1000) y = pdf(x, np.array(list(pdf.initial.values())), 0) np.testing.assert_allclose(np.trapz(y, x),", "initial = np.array(list(pdf.initial.values())) assert np.array_equal(pdf._lookup_parameters(initial, 0), np.array([0, 0.1])) assert np.array_equal(pdf._lookup_parameters(initial, 1), np.array([0, 0.3]))", "np from numpy.testing import assert_allclose import pytest def test_pdf_parameter(): initial = 1 limits", "assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.5 with", "== 9 assert iminuit_kwargs[\"mean\"] == 0 assert iminuit_kwargs[\"sigma0\"] == 0.1 assert iminuit_kwargs[\"sigma1\"] ==", "assert pdf.parameters[\"sigma1\"].initial == 0.4 pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial", "def test_pdf_parameter(): initial = 1 limits = (0, 4) fixed = True multi", "\"sigma0\", \"sigma1\"] def test_iminuit_kwargs(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2),", "assert param.fixed is fixed assert param.multi is multi param = PDFParameter(initial=initial, limits=limits) assert", "sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) x = np.linspace(-1,", "multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.function", "import PDFParameter, PDF from spefit.common.stats import normal_pdf import numpy as np from numpy.testing", "is False assert param.multi is False def test_pdf_class(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "parameters) x = np.linspace(-1, 6, 100) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 0), pdf._function(x, 0,", "pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 2], [1, 3]]))", "0.1]), 1) with pytest.raises(TypeError): # noinspection PyTypeChecker pdf(x, [0, 0.1, 0.2], 1) def", "0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 1]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)),", "@pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_disable_pedestal(PDFSubclass): pdf = PDFSubclass(n_illuminations=1, disable_pedestal=True) x = np.linspace(-5, 100, 1000)", "pdf.initial[\"lambda_0\"] pedestal_contribution = np.exp(-lambda_) np.testing.assert_allclose(np.trapz(y, x), 1 - pedestal_contribution, rtol=1e-3) def test_from_name(): pdf", "pdf.update_parameters_initial(mean0=2, sigma0=0.4, sigma1=0.5) with pytest.raises(ValueError): pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5, sigma2=0.5) def test_update_parameters_limits(): parameters =", "np.array_equal(key_array[pdf._lookup[0]], [\"mean0\", \"sigma0\"]) assert np.array_equal(key_array[pdf._lookup[1]], [\"mean1\", \"sigma1\"]) def test_lookup_parameters(): parameters = dict( mean=PDFParameter(initial=0,", "with pytest.raises(IndexError): pdf(x, np.array([0, 0.1, 0.2]), 2) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1]), 1)", "limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) x = np.linspace(-1, 6,", "[0, 0.1, 0.2], 1) def test_update_parameters_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1,", "== initial assert param.limits == limits assert param.fixed is False assert param.multi is", "import assert_allclose import pytest def test_pdf_parameter(): initial = 1 limits = (0, 4)", "0.4 pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert", "multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations", "limits = (0, 4) fixed = True multi = True param = PDFParameter(initial=initial,", "fixed = True multi = True param = PDFParameter(initial=initial, limits=limits, fixed=fixed, multi=multi) assert", "limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) pdf.update_parameters_limits(sigma1=(1, 2)) pdf.update_parameters_fixed(sigma1=True)", "4) fixed = True multi = True param = PDFParameter(initial=initial, limits=limits, fixed=fixed, multi=multi)", "2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].limits ==", "len(iminuit_kwargs) == 9 assert iminuit_kwargs[\"mean\"] == 0 assert iminuit_kwargs[\"sigma0\"] == 0.1 assert iminuit_kwargs[\"sigma1\"]", "sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) results = PDF._prepare_parameters(parameters, 1) parameters, is_multi, lookup = results", "1), np.array([0, 0.3])) def test_call(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0,", "is False assert iminuit_kwargs[\"fix_sigma1\"] is True # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_pdf_subclasses(PDFSubclass):", "fixed assert param.multi is multi param = PDFParameter(initial=initial, limits=limits) assert param.initial == initial", "pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma\"].initial == 0.4 parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)),", "parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 3", "pdf.function == normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 4 assert pdf.parameters[\"sigma0\"].initial", "limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2)", "2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.4 pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5) assert", "assert iminuit_kwargs[\"sigma1\"] == 0.2 assert iminuit_kwargs[\"limit_mean\"] == (-2, 2) assert iminuit_kwargs[\"limit_sigma0\"] == (0,", "2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) assert pdf.initial == dict(mean=0,", "pdf.update_parameters_limits(mean=limit) assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) limit[0] = 1 assert tuple(pdf.parameters[\"mean\"].limits) == (2,", "= 1 assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) def test_update_parameters_fixed(): parameters = dict( mean=PDFParameter(initial=0,", "pdf.parameters[\"sigma\"].limits == (0, 4) # Test mutable limit = [2, 3] # noinspection", "False assert param.multi is False def test_pdf_class(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)),", "sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.3) initial =", "1), pdf._function(x, 0, 0.2)) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1, 0.2]), 2) with pytest.raises(IndexError):", "PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].fixed is False assert pdf.parameters[\"sigma\"].fixed is False pdf.update_parameters_fixed(mean=True, sigma=True)", "sigma1=0.5) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.5", "sigma=True) assert pdf.parameters[\"mean\"].fixed is True assert pdf.parameters[\"sigma\"].fixed is True # noinspection DuplicatedCode def", "noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_disable_pedestal(PDFSubclass): pdf = PDFSubclass(n_illuminations=1, disable_pedestal=True) x = np.linspace(-5,", "np.array_equal(key_array[pdf._lookup[1]], [\"mean1\", \"sigma1\"]) def test_lookup_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0,", "3) def test_update_parameters_fixed(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), )", "2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) pdf.update_parameters_limits(sigma1=(1, 2)) pdf.update_parameters_fixed(sigma1=True) iminuit_kwargs", "== (0, 2) pdf.update_parameters_limits(mean=(-3, 3), sigma=(0, 4)) assert pdf.parameters[\"mean\"].limits == (-3, 3) assert", "PDF from spefit.common.stats import normal_pdf import numpy as np from numpy.testing import assert_allclose", "2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0 assert", "np.exp(-lambda_) np.testing.assert_allclose(np.trapz(y, x), 1 - pedestal_contribution, rtol=1e-3) def test_from_name(): pdf = PDF.from_name(\"SiPMGentile\", n_illuminations=1)", "assert len(lookup[0]) == 2 def test_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1,", "np.array_equal(pdf._lookup, np.array([[0, 2], [1, 3]])) key_array = np.array(list(pdf.parameters.keys())) assert np.array_equal(key_array[pdf._lookup[0]], [\"mean0\", \"sigma0\"]) assert", "assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial ==", "PDFParameter(initial=initial, limits=limits) assert param.initial == initial assert param.limits == limits assert param.fixed is", "2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) results = PDF._prepare_parameters(parameters, 1) parameters, is_multi, lookup =", "pdf.parameters[\"sigma1\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial == 0.4", "param.limits == limits assert param.fixed is False assert param.multi is False def test_pdf_class():", "def test_pdf_class(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf", "False pdf.update_parameters_fixed(mean=True, sigma=True) assert pdf.parameters[\"mean\"].fixed is True assert pdf.parameters[\"sigma\"].fixed is True # noinspection", "== 2 assert len(lookup[0]) == 2 def test_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1]])) pdf = PDF(2, normal_pdf, parameters) assert", "normal_pdf, parameters) assert pdf.parameter_names == [\"mean\", \"sigma0\", \"sigma1\"] def test_iminuit_kwargs(): parameters = dict(", "multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.n_free_parameters == 3 pdf.update_parameters_fixed(sigma1=True) assert", "assert len(is_multi) == 2 assert len(lookup) == 1 assert len(lookup[0]) == 2 parameters", "y = pdf(x, np.array(list(pdf.initial.values())), 0) np.testing.assert_allclose(np.trapz(y, x), 1, rtol=1e-3) # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\",", "param = PDFParameter(initial=initial, limits=limits) assert param.initial == initial assert param.limits == limits assert", "pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.4 pdf.update_parameters_initial(mean=2, sigma0=0.4,", "len(lookup) == 1 assert len(lookup[0]) == 2 parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2),", "PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.3) initial = np.array(list(pdf.initial.values())) assert np.array_equal(pdf._lookup_parameters(initial, 0), np.array([0, 0.1])) assert", "with pytest.raises(TypeError): # noinspection PyTypeChecker pdf(x, [0, 0.1, 0.2], 1) def test_update_parameters_initial(): parameters", "limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters)", "1 assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1]]))", "assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 2], [1, 3]])) key_array = np.array(list(pdf.parameters.keys()))", "assert len(lookup[0]) == 2 parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0,", "assert np.array_equal(key_array[pdf._lookup[1]], [\"mean1\", \"sigma1\"]) def test_lookup_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1,", "np.array_equal(pdf._lookup, np.array([[0, 1], [0, 2]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1,", "pdf(x, np.array([0, 0.1, 0.2]), 2) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1]), 1) with pytest.raises(TypeError):", "multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma0\"].initial", "assert param.limits == limits assert param.fixed is False assert param.multi is False def", "[1, 3]])) key_array = np.array(list(pdf.parameters.keys())) assert np.array_equal(key_array[pdf._lookup[0]], [\"mean0\", \"sigma0\"]) assert np.array_equal(key_array[pdf._lookup[1]], [\"mean1\", \"sigma1\"])", "np.array([[0, 1], [0, 2]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0,", "pdf.update_parameters_fixed(mean=True, sigma=True) assert pdf.parameters[\"mean\"].fixed is True assert pdf.parameters[\"sigma\"].fixed is True # noinspection DuplicatedCode", "assert param.limits == limits assert param.fixed is fixed assert param.multi is multi param", "assert len(pdf.parameters) == 3 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert", ") pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) assert pdf.initial == dict(mean=0, sigma0=0.1, sigma1=0.2)", "np.array([[0, 1], [0, 1]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2),", "2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.n_free_parameters == 3 pdf.update_parameters_fixed(sigma1=True)", "assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 1]])) parameters = dict(", "parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations == 1 assert len(pdf.parameters) == 2", "== normal_pdf assert pdf.n_illuminations == 1 assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial ==", "True multi = True param = PDFParameter(initial=initial, limits=limits, fixed=fixed, multi=multi) assert param.initial ==", "2]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), )", "len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1]])) pdf =", "np.array(list(pdf.initial.values())) assert np.array_equal(pdf._lookup_parameters(initial, 0), np.array([0, 0.1])) assert np.array_equal(pdf._lookup_parameters(initial, 1), np.array([0, 0.3])) def test_call():", "def test_pdf_subclasses(PDFSubclass): pdf = PDFSubclass(n_illuminations=1) x = np.linspace(-5, 100, 1000) y = pdf(x,", "limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].limits", "2 assert len(pdf.parameters) == 3 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1", "normal_pdf assert pdf.n_illuminations == 1 assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial == 0.1", "import normal_pdf import numpy as np from numpy.testing import assert_allclose import pytest def", "= PDF._prepare_parameters(parameters, 1) parameters, is_multi, lookup = results assert len(parameters) == 2 assert", "pdf.n_illuminations == 2 assert len(pdf.parameters) == 4 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial", "assert pdf.parameters[\"sigma\"].limits == (0, 4) # Test mutable limit = [2, 3] #", "0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial", "DuplicatedCode def test_prepare_multi_illumination_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), )", "assert pdf.parameters[\"sigma\"].initial == 0.4 parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2),", "= PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].fixed is False assert pdf.parameters[\"sigma\"].fixed is False pdf.update_parameters_fixed(mean=True,", "2), multi=True), ) results = PDF._prepare_parameters(parameters, 2) parameters, is_multi, lookup = results assert", "== 2 assert len(lookup) == 2 assert len(lookup[0]) == 2 def test_initial(): parameters", "results = PDF._prepare_parameters(parameters, 1) parameters, is_multi, lookup = results assert len(parameters) == 2", "pytest.raises(IndexError): pdf(x, np.array([0, 0.1]), 1) with pytest.raises(TypeError): # noinspection PyTypeChecker pdf(x, [0, 0.1,", "sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) results = PDF._prepare_parameters(parameters, 2) parameters, is_multi, lookup =", "2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) results = PDF._prepare_parameters(parameters, 1) parameters, is_multi,", "x = np.linspace(-5, 100, 1000) y = pdf(x, np.array(list(pdf.initial.values())), 0) np.testing.assert_allclose(np.trapz(y, x), 1,", "0.4 assert pdf.parameters[\"sigma1\"].initial == 0.5 with pytest.raises(ValueError): pdf.update_parameters_initial(mean0=2, sigma0=0.4, sigma1=0.5) with pytest.raises(ValueError): pdf.update_parameters_initial(mean=2,", "assert pdf.initial == dict(mean=0, sigma0=0.1, sigma1=0.2) def test_n_free_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) assert pdf.initial ==", "pytest def test_pdf_parameter(): initial = 1 limits = (0, 4) fixed = True", "== 1 assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0,", "def test_update_parameters_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf", "pdf.n_free_parameters == 2 def test_parameter_names(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0,", "pdf = PDF(1, normal_pdf, parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations == 1", "parameters) assert pdf.parameters[\"mean\"].limits == (-2, 2) assert pdf.parameters[\"sigma\"].limits == (0, 2) pdf.update_parameters_limits(mean=(-3, 3),", "assert pdf.parameters[\"mean\"].fixed is True assert pdf.parameters[\"sigma\"].fixed is True # noinspection DuplicatedCode def test_prepare_multi_illumination_parameters():", "np.linspace(-5, 100, 1000) y = pdf(x, np.array(list(pdf.initial.values())), 0) np.testing.assert_allclose(np.trapz(y, x), 1, rtol=1e-3) #", "dict(mean=0, sigma0=0.1, sigma1=0.2) def test_n_free_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0,", "== 2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 1]])) parameters", "0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 2], [1, 3]])) key_array =", "mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf,", "len(parameters) == 4 assert len(is_multi) == 2 assert len(lookup) == 2 assert len(lookup[0])", "assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) def test_update_parameters_fixed(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)),", "4 assert len(is_multi) == 2 assert len(lookup) == 2 assert len(lookup[0]) == 2", "x), 1 - pedestal_contribution, rtol=1e-3) def test_from_name(): pdf = PDF.from_name(\"SiPMGentile\", n_illuminations=1) assert pdf.__class__.__name__", "== 0.1 assert iminuit_kwargs[\"sigma1\"] == 0.2 assert iminuit_kwargs[\"limit_mean\"] == (-2, 2) assert iminuit_kwargs[\"limit_sigma0\"]", "normal_pdf, parameters) x = np.linspace(-1, 6, 100) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 0), pdf._function(x,", "normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert", "0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma\"].initial == 0.4 parameters =", "2], [1, 3]])) key_array = np.array(list(pdf.parameters.keys())) assert np.array_equal(key_array[pdf._lookup[0]], [\"mean0\", \"sigma0\"]) assert np.array_equal(key_array[pdf._lookup[1]], [\"mean1\",", "2)), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma\"].initial", "assert np.array_equal(pdf._lookup_parameters(initial, 1), np.array([0, 0.3])) def test_call(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)),", "assert len(is_multi) == 2 assert len(lookup) == 2 assert len(lookup[0]) == 2 def", "0.2], 1) def test_update_parameters_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)),", "== 0 assert iminuit_kwargs[\"sigma0\"] == 0.1 assert iminuit_kwargs[\"sigma1\"] == 0.2 assert iminuit_kwargs[\"limit_mean\"] ==", "normal_pdf, parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) ==", "== 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 1]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "= PDF(2, normal_pdf, parameters) assert pdf.parameter_names == [\"mean\", \"sigma0\", \"sigma1\"] def test_iminuit_kwargs(): parameters", "np.array(list(pdf.initial.values())), 0) np.testing.assert_allclose(np.trapz(y, x), 1, rtol=1e-3) # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_disable_pedestal(PDFSubclass):", "np.linspace(-5, 100, 1000) y = pdf(x, np.array(list(pdf.initial.values())), 0) lambda_ = pdf.initial[\"lambda_0\"] pedestal_contribution =", "parameters) assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1", "== normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial ==", "parameters) assert pdf.parameters[\"mean\"].fixed is False assert pdf.parameters[\"sigma\"].fixed is False pdf.update_parameters_fixed(mean=True, sigma=True) assert pdf.parameters[\"mean\"].fixed", "assert iminuit_kwargs[\"fix_sigma0\"] is False assert iminuit_kwargs[\"fix_sigma1\"] is True # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__())", "def test_from_name(): pdf = PDF.from_name(\"SiPMGentile\", n_illuminations=1) assert pdf.__class__.__name__ == \"SiPMGentile\" with pytest.raises(ValueError): PDF.from_name(\"NULL\",", "2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) pdf.update_parameters_limits(sigma1=(1,", "assert iminuit_kwargs[\"fix_mean\"] is False assert iminuit_kwargs[\"fix_sigma0\"] is False assert iminuit_kwargs[\"fix_sigma1\"] is True #", "pdf.function == normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial", "np.array_equal(pdf._lookup, np.array([[0, 1], [0, 1]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0,", "assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.4 pdf.update_parameters_initial(mean=2,", "test_update_parameters_fixed(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf =", "limits=(0, 2), multi=True), ) results = PDF._prepare_parameters(parameters, 2) parameters, is_multi, lookup = results", "1) parameters, is_multi, lookup = results assert len(parameters) == 2 assert len(is_multi) ==", "np.testing.assert_allclose(np.trapz(y, x), 1 - pedestal_contribution, rtol=1e-3) def test_from_name(): pdf = PDF.from_name(\"SiPMGentile\", n_illuminations=1) assert", "assert len(lookup) == 1 assert len(lookup[0]) == 2 parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "PDF._prepare_parameters(parameters, 2) parameters, is_multi, lookup = results assert len(parameters) == 4 assert len(is_multi)", "== 2 assert len(pdf.parameters) == 4 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial ==", "assert pdf.parameters[\"sigma1\"].initial == 0.5 with pytest.raises(ValueError): pdf.update_parameters_initial(mean0=2, sigma0=0.4, sigma1=0.5) with pytest.raises(ValueError): pdf.update_parameters_initial(mean=2, sigma0=0.4,", "4) # Test mutable limit = [2, 3] # noinspection PyTypeChecker pdf.update_parameters_limits(mean=limit) assert", "= dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf =", "== 4 assert len(is_multi) == 2 assert len(lookup) == 2 assert len(lookup[0]) ==", "sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) assert pdf.initial", "== 2 assert len(is_multi) == 2 assert len(lookup) == 1 assert len(lookup[0]) ==", "sigma1=0.5) with pytest.raises(ValueError): pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5, sigma2=0.5) def test_update_parameters_limits(): parameters = dict( mean=PDFParameter(initial=0,", "assert param.multi is multi param = PDFParameter(initial=initial, limits=limits) assert param.initial == initial assert", "np.array([0, 0.1]), 1) with pytest.raises(TypeError): # noinspection PyTypeChecker pdf(x, [0, 0.1, 0.2], 1)", "[0, 1]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), )", "0), np.array([0, 0.1])) assert np.array_equal(pdf._lookup_parameters(initial, 1), np.array([0, 0.3])) def test_call(): parameters = dict(", "0 assert iminuit_kwargs[\"sigma0\"] == 0.1 assert iminuit_kwargs[\"sigma1\"] == 0.2 assert iminuit_kwargs[\"limit_mean\"] == (-2,", "PyTypeChecker pdf(x, [0, 0.1, 0.2], 1) def test_update_parameters_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "[\"mean0\", \"sigma0\"]) assert np.array_equal(key_array[pdf._lookup[1]], [\"mean1\", \"sigma1\"]) def test_lookup_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "np.array_equal(pdf._lookup_parameters(initial, 1), np.array([0, 0.3])) def test_call(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1,", "pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2", "len(lookup[0]) == 2 def test_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0,", "pdf.update_parameters_limits(mean=(-3, 3), sigma=(0, 4)) assert pdf.parameters[\"mean\"].limits == (-3, 3) assert pdf.parameters[\"sigma\"].limits == (0,", "2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) results = PDF._prepare_parameters(parameters, 2) parameters, is_multi,", "pdf.update_parameters_initial(sigma1=0.3) initial = np.array(list(pdf.initial.values())) assert np.array_equal(pdf._lookup_parameters(initial, 0), np.array([0, 0.1])) assert np.array_equal(pdf._lookup_parameters(initial, 1), np.array([0,", "2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameter_names == [\"mean\", \"sigma0\",", "pdf.parameters[\"sigma\"].initial == 0.4 parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),", "3 pdf.update_parameters_fixed(sigma1=True) assert pdf.n_free_parameters == 2 def test_parameter_names(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2", "pdf = PDFSubclass(n_illuminations=1, disable_pedestal=True) x = np.linspace(-5, 100, 1000) y = pdf(x, np.array(list(pdf.initial.values())),", "pdf.parameters[\"sigma1\"].initial == 0.4 pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial ==", "normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 4 assert pdf.parameters[\"sigma0\"].initial == 0.1", "0.4 parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf", "assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 0), pdf._function(x, 0, 0.1)) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 1),", "def test_lookup_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), )", "sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.4", "parameters, is_multi, lookup = results assert len(parameters) == 4 assert len(is_multi) == 2", "2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].fixed is False assert pdf.parameters[\"sigma\"].fixed", "2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 1]])) parameters =", "pdf = PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].fixed is False assert pdf.parameters[\"sigma\"].fixed is False", "(0, 2) pdf.update_parameters_limits(mean=(-3, 3), sigma=(0, 4)) assert pdf.parameters[\"mean\"].limits == (-3, 3) assert pdf.parameters[\"sigma\"].limits", "= np.array(list(pdf.initial.values())) assert np.array_equal(pdf._lookup_parameters(initial, 0), np.array([0, 0.1])) assert np.array_equal(pdf._lookup_parameters(initial, 1), np.array([0, 0.3])) def", "2 assert len(lookup[0]) == 2 def test_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)),", "tuple(pdf.parameters[\"mean\"].limits) == (2, 3) def test_update_parameters_fixed(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1,", "pdf = PDF(2, normal_pdf, parameters) assert pdf.n_free_parameters == 3 pdf.update_parameters_fixed(sigma1=True) assert pdf.n_free_parameters ==", "== 0.2 assert iminuit_kwargs[\"limit_mean\"] == (-2, 2) assert iminuit_kwargs[\"limit_sigma0\"] == (0, 2) assert", "lookup = results assert len(parameters) == 2 assert len(is_multi) == 2 assert len(lookup)", "(0, 4) fixed = True multi = True param = PDFParameter(initial=initial, limits=limits, fixed=fixed,", "is multi param = PDFParameter(initial=initial, limits=limits) assert param.initial == initial assert param.limits ==", "pdf.update_parameters_initial(sigma1=0.2) assert pdf.initial == dict(mean=0, sigma0=0.1, sigma1=0.2) def test_n_free_parameters(): parameters = dict( mean=PDFParameter(initial=0,", "== initial assert param.limits == limits assert param.fixed is fixed assert param.multi is", "2 assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1],", "assert pdf.parameters[\"mean\"].fixed is False assert pdf.parameters[\"sigma\"].fixed is False pdf.update_parameters_fixed(mean=True, sigma=True) assert pdf.parameters[\"mean\"].fixed is", "pdf = PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma\"].initial == 0.1", "with pytest.raises(ValueError): pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5, sigma2=0.5) def test_update_parameters_limits(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf,", "iminuit_kwargs[\"limit_sigma1\"] == (1, 2) assert iminuit_kwargs[\"fix_mean\"] is False assert iminuit_kwargs[\"fix_sigma0\"] is False assert", "assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma\"].initial == 0.4 parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "assert pdf.n_free_parameters == 2 def test_parameter_names(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1,", "parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf =", "pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial", "pdf = PDF(2, normal_pdf, parameters) x = np.linspace(-1, 6, 100) assert_allclose(pdf(x, np.array([0, 0.1,", "limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert", "spefit.pdf.base import PDFParameter, PDF from spefit.common.stats import normal_pdf import numpy as np from", "assert pdf.parameters[\"mean\"].limits == (-3, 3) assert pdf.parameters[\"sigma\"].limits == (0, 4) # Test mutable", "True param = PDFParameter(initial=initial, limits=limits, fixed=fixed, multi=multi) assert param.initial == initial assert param.limits", "= [2, 3] # noinspection PyTypeChecker pdf.update_parameters_limits(mean=limit) assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) limit[0]", "limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) results = PDF._prepare_parameters(parameters, 1) parameters, is_multi, lookup", "== 2 def test_parameter_names(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2),", "= pdf(x, np.array(list(pdf.initial.values())), 0) lambda_ = pdf.initial[\"lambda_0\"] pedestal_contribution = np.exp(-lambda_) np.testing.assert_allclose(np.trapz(y, x), 1", "0.1)) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 1), pdf._function(x, 0, 0.2)) with pytest.raises(IndexError): pdf(x, np.array([0,", "len(lookup) == 2 assert len(lookup[0]) == 2 def test_initial(): parameters = dict( mean=PDFParameter(initial=0,", "pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma\"].initial == 0.4 parameters = dict(", "= np.linspace(-5, 100, 1000) y = pdf(x, np.array(list(pdf.initial.values())), 0) np.testing.assert_allclose(np.trapz(y, x), 1, rtol=1e-3)", "np.array(list(pdf.initial.values())), 0) lambda_ = pdf.initial[\"lambda_0\"] pedestal_contribution = np.exp(-lambda_) np.testing.assert_allclose(np.trapz(y, x), 1 - pedestal_contribution,", "assert param.fixed is False assert param.multi is False def test_pdf_class(): parameters = dict(", "assert pdf.parameters[\"sigma\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma\"].initial ==", "assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 1]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1,", "2) parameters, is_multi, lookup = results assert len(parameters) == 4 assert len(is_multi) ==", "# noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_disable_pedestal(PDFSubclass): pdf = PDFSubclass(n_illuminations=1, disable_pedestal=True) x =", "mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(1, normal_pdf, parameters) assert", "1 limits = (0, 4) fixed = True multi = True param =", "= PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].limits == (-2, 2) assert pdf.parameters[\"sigma\"].limits == (0,", "assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert", "1]])) pdf = PDF(2, normal_pdf, parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations ==", "[0, 2]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),", "assert iminuit_kwargs[\"mean\"] == 0 assert iminuit_kwargs[\"sigma0\"] == 0.1 assert iminuit_kwargs[\"sigma1\"] == 0.2 assert", "np.array(list(pdf.parameters.keys())) assert np.array_equal(key_array[pdf._lookup[0]], [\"mean0\", \"sigma0\"]) assert np.array_equal(key_array[pdf._lookup[1]], [\"mean1\", \"sigma1\"]) def test_lookup_parameters(): parameters =", "== 2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.4 pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5)", "0), pdf._function(x, 0, 0.1)) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 1), pdf._function(x, 0, 0.2)) with", "assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial ==", "False assert iminuit_kwargs[\"fix_sigma1\"] is True # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_pdf_subclasses(PDFSubclass): pdf", "dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(2, normal_pdf, parameters)", "pdf.iminuit_kwargs assert len(iminuit_kwargs) == 9 assert iminuit_kwargs[\"mean\"] == 0 assert iminuit_kwargs[\"sigma0\"] == 0.1", "= pdf(x, np.array(list(pdf.initial.values())), 0) np.testing.assert_allclose(np.trapz(y, x), 1, rtol=1e-3) # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__())", "== 0 assert pdf.parameters[\"sigma\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert", "assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 pdf.update_parameters_initial(mean=2,", "pdf.parameters[\"sigma\"].fixed is False pdf.update_parameters_fixed(mean=True, sigma=True) assert pdf.parameters[\"mean\"].fixed is True assert pdf.parameters[\"sigma\"].fixed is True", ") results = PDF._prepare_parameters(parameters, 2) parameters, is_multi, lookup = results assert len(parameters) ==", "assert np.array_equal(pdf._lookup, np.array([[0, 1]])) pdf = PDF(2, normal_pdf, parameters) assert pdf.function == normal_pdf", "as np from numpy.testing import assert_allclose import pytest def test_pdf_parameter(): initial = 1", "test_pdf_parameter(): initial = 1 limits = (0, 4) fixed = True multi =", "def test_disable_pedestal(PDFSubclass): pdf = PDFSubclass(n_illuminations=1, disable_pedestal=True) x = np.linspace(-5, 100, 1000) y =", "4)) assert pdf.parameters[\"mean\"].limits == (-3, 3) assert pdf.parameters[\"sigma\"].limits == (0, 4) # Test", "pdf(x, [0, 0.1, 0.2], 1) def test_update_parameters_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)),", "disable_pedestal=True) x = np.linspace(-5, 100, 1000) y = pdf(x, np.array(list(pdf.initial.values())), 0) lambda_ =", "== 3 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0,", "sigma0=0.4, sigma1=0.5) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial ==", "limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.function", "sigma=(0, 4)) assert pdf.parameters[\"mean\"].limits == (-3, 3) assert pdf.parameters[\"sigma\"].limits == (0, 4) #", "results assert len(parameters) == 4 assert len(is_multi) == 2 assert len(lookup) == 2", "100, 1000) y = pdf(x, np.array(list(pdf.initial.values())), 0) lambda_ = pdf.initial[\"lambda_0\"] pedestal_contribution = np.exp(-lambda_)", "assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 3 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert", "= PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert", "1000) y = pdf(x, np.array(list(pdf.initial.values())), 0) np.testing.assert_allclose(np.trapz(y, x), 1, rtol=1e-3) # noinspection PyPep8Naming,PyArgumentList", "assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1]])) pdf", "pdf = PDF(2, normal_pdf, parameters) assert pdf.parameter_names == [\"mean\", \"sigma0\", \"sigma1\"] def test_iminuit_kwargs():", "from spefit.common.stats import normal_pdf import numpy as np from numpy.testing import assert_allclose import", "with pytest.raises(IndexError): pdf(x, np.array([0, 0.1]), 1) with pytest.raises(TypeError): # noinspection PyTypeChecker pdf(x, [0,", "pedestal_contribution = np.exp(-lambda_) np.testing.assert_allclose(np.trapz(y, x), 1 - pedestal_contribution, rtol=1e-3) def test_from_name(): pdf =", "pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 2]])) parameters = dict( mean=PDFParameter(initial=0,", "multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) assert pdf.initial == dict(mean=0, sigma0=0.1,", "2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.n_free_parameters", "assert pdf.parameter_names == [\"mean\", \"sigma0\", \"sigma1\"] def test_iminuit_kwargs(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "def test_n_free_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), )", "pdf(x, np.array(list(pdf.initial.values())), 0) np.testing.assert_allclose(np.trapz(y, x), 1, rtol=1e-3) # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def", "pdf(x, np.array(list(pdf.initial.values())), 0) lambda_ = pdf.initial[\"lambda_0\"] pedestal_contribution = np.exp(-lambda_) np.testing.assert_allclose(np.trapz(y, x), 1 -", "limits=(0, 2)), ) results = PDF._prepare_parameters(parameters, 1) parameters, is_multi, lookup = results assert", "0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 2]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2),", "== normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 3 assert pdf.parameters[\"sigma0\"].initial ==", "parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf", "PyTypeChecker pdf.update_parameters_limits(mean=limit) assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) limit[0] = 1 assert tuple(pdf.parameters[\"mean\"].limits) ==", "0.1 assert np.array_equal(pdf._lookup, np.array([[0, 2], [1, 3]])) key_array = np.array(list(pdf.parameters.keys())) assert np.array_equal(key_array[pdf._lookup[0]], [\"mean0\",", "100, 1000) y = pdf(x, np.array(list(pdf.initial.values())), 0) np.testing.assert_allclose(np.trapz(y, x), 1, rtol=1e-3) # noinspection", ") pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.3) initial = np.array(list(pdf.initial.values())) assert np.array_equal(pdf._lookup_parameters(initial, 0),", "[\"mean1\", \"sigma1\"]) def test_lookup_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2),", "limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) results = PDF._prepare_parameters(parameters, 2) parameters,", "multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) results = PDF._prepare_parameters(parameters, 2) parameters, is_multi, lookup", "pdf._function(x, 0, 0.1)) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 1), pdf._function(x, 0, 0.2)) with pytest.raises(IndexError):", "== 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert", "np.array([0, 0.1, 0.2]), 0), pdf._function(x, 0, 0.1)) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 1), pdf._function(x,", "2 assert pdf.parameters[\"sigma\"].initial == 0.4 parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0,", "\"sigma0\"]) assert np.array_equal(key_array[pdf._lookup[1]], [\"mean1\", \"sigma1\"]) def test_lookup_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)),", "== (0, 4) # Test mutable limit = [2, 3] # noinspection PyTypeChecker", "from spefit.pdf.base import PDFParameter, PDF from spefit.common.stats import normal_pdf import numpy as np", "(-2, 2) assert pdf.parameters[\"sigma\"].limits == (0, 2) pdf.update_parameters_limits(mean=(-3, 3), sigma=(0, 4)) assert pdf.parameters[\"mean\"].limits", ") pdf = PDF(2, normal_pdf, parameters) assert pdf.n_free_parameters == 3 pdf.update_parameters_fixed(sigma1=True) assert pdf.n_free_parameters", "assert pdf.parameters[\"sigma\"].fixed is False pdf.update_parameters_fixed(mean=True, sigma=True) assert pdf.parameters[\"mean\"].fixed is True assert pdf.parameters[\"sigma\"].fixed is", "== 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 2]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "def test_iminuit_kwargs(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), )", "multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.3) initial = np.array(list(pdf.initial.values())) assert np.array_equal(pdf._lookup_parameters(initial,", "def test_update_parameters_fixed(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf", "PDFSubclass(n_illuminations=1, disable_pedestal=True) x = np.linspace(-5, 100, 1000) y = pdf(x, np.array(list(pdf.initial.values())), 0) lambda_", "assert pdf.parameters[\"sigma\"].fixed is True # noinspection DuplicatedCode def test_prepare_multi_illumination_parameters(): parameters = dict( mean=PDFParameter(initial=0,", "= True param = PDFParameter(initial=initial, limits=limits, fixed=fixed, multi=multi) assert param.initial == initial assert", "pdf.update_parameters_fixed(sigma1=True) iminuit_kwargs = pdf.iminuit_kwargs assert len(iminuit_kwargs) == 9 assert iminuit_kwargs[\"mean\"] == 0 assert", "np.array([0, 0.1, 0.2]), 1), pdf._function(x, 0, 0.2)) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1, 0.2]),", "PDFParameter, PDF from spefit.common.stats import normal_pdf import numpy as np from numpy.testing import", "== (-2, 2) assert iminuit_kwargs[\"limit_sigma0\"] == (0, 2) assert iminuit_kwargs[\"limit_sigma1\"] == (1, 2)", "pytest.raises(IndexError): pdf(x, np.array([0, 0.1, 0.2]), 2) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1]), 1) with", "param.limits == limits assert param.fixed is fixed assert param.multi is multi param =", "- pedestal_contribution, rtol=1e-3) def test_from_name(): pdf = PDF.from_name(\"SiPMGentile\", n_illuminations=1) assert pdf.__class__.__name__ == \"SiPMGentile\"", "sigma2=0.5) def test_update_parameters_limits(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), )", "== 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.5 with pytest.raises(ValueError): pdf.update_parameters_initial(mean0=2, sigma0=0.4, sigma1=0.5) with pytest.raises(ValueError):", "2) assert iminuit_kwargs[\"fix_mean\"] is False assert iminuit_kwargs[\"fix_sigma0\"] is False assert iminuit_kwargs[\"fix_sigma1\"] is True", "def test_update_parameters_limits(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf", "multi param = PDFParameter(initial=initial, limits=limits) assert param.initial == initial assert param.limits == limits", "with pytest.raises(ValueError): pdf.update_parameters_initial(mean0=2, sigma0=0.4, sigma1=0.5) with pytest.raises(ValueError): pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5, sigma2=0.5) def test_update_parameters_limits():", "0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1]])) pdf = PDF(2, normal_pdf, parameters) assert pdf.function ==", "= np.linspace(-5, 100, 1000) y = pdf(x, np.array(list(pdf.initial.values())), 0) lambda_ = pdf.initial[\"lambda_0\"] pedestal_contribution", "assert pdf.function == normal_pdf assert pdf.n_illuminations == 1 assert len(pdf.parameters) == 2 assert", "1000) y = pdf(x, np.array(list(pdf.initial.values())), 0) lambda_ = pdf.initial[\"lambda_0\"] pedestal_contribution = np.exp(-lambda_) np.testing.assert_allclose(np.trapz(y,", "assert pdf.n_illuminations == 1 assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert", "== 2 assert len(lookup) == 1 assert len(lookup[0]) == 2 parameters = dict(", "param.initial == initial assert param.limits == limits assert param.fixed is False assert param.multi", "True assert pdf.parameters[\"sigma\"].fixed is True # noinspection DuplicatedCode def test_prepare_multi_illumination_parameters(): parameters = dict(", "assert len(parameters) == 4 assert len(is_multi) == 2 assert len(lookup) == 2 assert", "= 1 limits = (0, 4) fixed = True multi = True param", "2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].limits == (-2, 2) assert", "assert param.initial == initial assert param.limits == limits assert param.fixed is False assert", "2 parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), )", "(2, 3) def test_update_parameters_fixed(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)),", "pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 2], [1, 3]])) key_array = np.array(list(pdf.parameters.keys())) assert", "assert param.initial == initial assert param.limits == limits assert param.fixed is fixed assert", "sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.function ==", "2 def test_parameter_names(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),", "== (2, 3) def test_update_parameters_fixed(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0,", "param.initial == initial assert param.limits == limits assert param.fixed is fixed assert param.multi", "x = np.linspace(-5, 100, 1000) y = pdf(x, np.array(list(pdf.initial.values())), 0) lambda_ = pdf.initial[\"lambda_0\"]", "1 - pedestal_contribution, rtol=1e-3) def test_from_name(): pdf = PDF.from_name(\"SiPMGentile\", n_illuminations=1) assert pdf.__class__.__name__ ==", "2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.function ==", "tuple(pdf.parameters[\"mean\"].limits) == (2, 3) limit[0] = 1 assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) def", ") results = PDF._prepare_parameters(parameters, 1) parameters, is_multi, lookup = results assert len(parameters) ==", "pdf = PDF(2, normal_pdf, parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations == 2", "0.1, 0.2]), 1), pdf._function(x, 0, 0.2)) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1, 0.2]), 2)", "2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial ==", "spefit.common.stats import normal_pdf import numpy as np from numpy.testing import assert_allclose import pytest", "PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4)", "test_prepare_multi_illumination_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) results =", "param = PDFParameter(initial=initial, limits=limits, fixed=fixed, multi=multi) assert param.initial == initial assert param.limits ==", "# noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_pdf_subclasses(PDFSubclass): pdf = PDFSubclass(n_illuminations=1) x = np.linspace(-5,", "pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial ==", "== 2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1]])) pdf = PDF(2,", ") pdf = PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma\"].initial ==", "sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].fixed is False", "0.2 assert iminuit_kwargs[\"limit_mean\"] == (-2, 2) assert iminuit_kwargs[\"limit_sigma0\"] == (0, 2) assert iminuit_kwargs[\"limit_sigma1\"]", ") pdf = PDF(2, normal_pdf, parameters) assert pdf.parameter_names == [\"mean\", \"sigma0\", \"sigma1\"] def", "pdf.n_illuminations == 2 assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup,", "normal_pdf import numpy as np from numpy.testing import assert_allclose import pytest def test_pdf_parameter():", "assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0,", "is False assert iminuit_kwargs[\"fix_sigma0\"] is False assert iminuit_kwargs[\"fix_sigma1\"] is True # noinspection PyPep8Naming,PyArgumentList", "noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_pdf_subclasses(PDFSubclass): pdf = PDFSubclass(n_illuminations=1) x = np.linspace(-5, 100,", "6, 100) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 0), pdf._function(x, 0, 0.1)) assert_allclose(pdf(x, np.array([0, 0.1,", "assert pdf.parameters[\"sigma1\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial ==", "pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 1]])) parameters = dict( mean=PDFParameter(initial=0,", "pytest.raises(TypeError): # noinspection PyTypeChecker pdf(x, [0, 0.1, 0.2], 1) def test_update_parameters_initial(): parameters =", "2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].fixed is", "== 2 def test_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2),", "limits=(0, 2)), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0 assert", "= PDF(2, normal_pdf, parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations == 2 assert", "assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 4 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert", "1 assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) def test_update_parameters_fixed(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "1]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf", "= PDFSubclass(n_illuminations=1, disable_pedestal=True) x = np.linspace(-5, 100, 1000) y = pdf(x, np.array(list(pdf.initial.values())), 0)", "2 def test_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),", "2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.5 with pytest.raises(ValueError): pdf.update_parameters_initial(mean0=2, sigma0=0.4,", "assert pdf.function == normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 3 assert", "pdf._function(x, 0, 0.2)) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1, 0.2]), 2) with pytest.raises(IndexError): pdf(x,", "2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.3) initial = np.array(list(pdf.initial.values())) assert", "limit[0] = 1 assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) def test_update_parameters_fixed(): parameters = dict(", "assert iminuit_kwargs[\"limit_mean\"] == (-2, 2) assert iminuit_kwargs[\"limit_sigma0\"] == (0, 2) assert iminuit_kwargs[\"limit_sigma1\"] ==", "len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 1]]))", "limits=(0, 2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.function == normal_pdf assert", "sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) results = PDF._prepare_parameters(parameters, 1) parameters, is_multi, lookup =", "== 2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.5 with pytest.raises(ValueError): pdf.update_parameters_initial(mean0=2,", "mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) results = PDF._prepare_parameters(parameters, 1) parameters, is_multi,", "pdf.update_parameters_fixed(sigma1=True) assert pdf.n_free_parameters == 2 def test_parameter_names(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)),", "2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1]])) pdf = PDF(2, normal_pdf,", "normal_pdf, parameters) assert pdf.parameters[\"mean\"].limits == (-2, 2) assert pdf.parameters[\"sigma\"].limits == (0, 2) pdf.update_parameters_limits(mean=(-3,", "test_pdf_class(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf =", "0.3])) def test_call(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),", "3] # noinspection PyTypeChecker pdf.update_parameters_limits(mean=limit) assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) limit[0] = 1", "np.testing.assert_allclose(np.trapz(y, x), 1, rtol=1e-3) # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_disable_pedestal(PDFSubclass): pdf =", "== (-2, 2) assert pdf.parameters[\"sigma\"].limits == (0, 2) pdf.update_parameters_limits(mean=(-3, 3), sigma=(0, 4)) assert", "Test mutable limit = [2, 3] # noinspection PyTypeChecker pdf.update_parameters_limits(mean=limit) assert tuple(pdf.parameters[\"mean\"].limits) ==", "PDF(2, normal_pdf, parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters)", "pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5, sigma2=0.5) def test_update_parameters_limits(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1,", "== normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 4 assert pdf.parameters[\"sigma0\"].initial ==", "PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].limits == (-2, 2) assert pdf.parameters[\"sigma\"].limits == (0, 2)", "pdf.n_free_parameters == 3 pdf.update_parameters_fixed(sigma1=True) assert pdf.n_free_parameters == 2 def test_parameter_names(): parameters = dict(", "== (0, 2) assert iminuit_kwargs[\"limit_sigma1\"] == (1, 2) assert iminuit_kwargs[\"fix_mean\"] is False assert", "= PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) assert pdf.initial == dict(mean=0, sigma0=0.1, sigma1=0.2) def test_n_free_parameters():", "pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.5 with pytest.raises(ValueError): pdf.update_parameters_initial(mean0=2, sigma0=0.4, sigma1=0.5) with", "= PDF(2, normal_pdf, parameters) x = np.linspace(-1, 6, 100) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]),", "sigma1=0.5, sigma2=0.5) def test_update_parameters_limits(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)),", "(-2, 2) assert iminuit_kwargs[\"limit_sigma0\"] == (0, 2) assert iminuit_kwargs[\"limit_sigma1\"] == (1, 2) assert", "0.1, 0.2], 1) def test_update_parameters_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0,", "= True multi = True param = PDFParameter(initial=initial, limits=limits, fixed=fixed, multi=multi) assert param.initial", "multi=multi) assert param.initial == initial assert param.limits == limits assert param.fixed is fixed", "3), sigma=(0, 4)) assert pdf.parameters[\"mean\"].limits == (-3, 3) assert pdf.parameters[\"sigma\"].limits == (0, 4)", "0.2]), 0), pdf._function(x, 0, 0.1)) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 1), pdf._function(x, 0, 0.2))", "lookup = results assert len(parameters) == 4 assert len(is_multi) == 2 assert len(lookup)", "assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 2]])) parameters = dict(", "pdf.initial == dict(mean=0, sigma0=0.1, sigma1=0.2) def test_n_free_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)),", "\"sigma1\"]) def test_lookup_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),", "assert pdf.parameters[\"sigma\"].limits == (0, 2) pdf.update_parameters_limits(mean=(-3, 3), sigma=(0, 4)) assert pdf.parameters[\"mean\"].limits == (-3,", "1 assert len(lookup[0]) == 2 parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1,", "normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial ==", "== 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1]])) pdf = PDF(2, normal_pdf, parameters) assert pdf.function", "0.4 assert pdf.parameters[\"sigma1\"].initial == 0.4 pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5) assert pdf.parameters[\"mean\"].initial == 2 assert", "pdf.n_illuminations == 2 assert len(pdf.parameters) == 3 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial", "iminuit_kwargs = pdf.iminuit_kwargs assert len(iminuit_kwargs) == 9 assert iminuit_kwargs[\"mean\"] == 0 assert iminuit_kwargs[\"sigma0\"]", "test_update_parameters_limits(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf =", "0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 2]])) parameters =", "noinspection DuplicatedCode def test_prepare_multi_illumination_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)),", "== limits assert param.fixed is fixed assert param.multi is multi param = PDFParameter(initial=initial,", "iminuit_kwargs[\"fix_mean\"] is False assert iminuit_kwargs[\"fix_sigma0\"] is False assert iminuit_kwargs[\"fix_sigma1\"] is True # noinspection", "2), multi=True), ) results = PDF._prepare_parameters(parameters, 1) parameters, is_multi, lookup = results assert", "= results assert len(parameters) == 2 assert len(is_multi) == 2 assert len(lookup) ==", "2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameter_names", "2)) pdf.update_parameters_fixed(sigma1=True) iminuit_kwargs = pdf.iminuit_kwargs assert len(iminuit_kwargs) == 9 assert iminuit_kwargs[\"mean\"] == 0", "limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial", "rtol=1e-3) # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_disable_pedestal(PDFSubclass): pdf = PDFSubclass(n_illuminations=1, disable_pedestal=True) x", "parameters) pdf.update_parameters_initial(sigma1=0.2) assert pdf.initial == dict(mean=0, sigma0=0.1, sigma1=0.2) def test_n_free_parameters(): parameters = dict(", "= PDFParameter(initial=initial, limits=limits) assert param.initial == initial assert param.limits == limits assert param.fixed", "== 0.4 pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial == 0.4", "assert np.array_equal(key_array[pdf._lookup[0]], [\"mean0\", \"sigma0\"]) assert np.array_equal(key_array[pdf._lookup[1]], [\"mean1\", \"sigma1\"]) def test_lookup_parameters(): parameters = dict(", "100) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 0), pdf._function(x, 0, 0.1)) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]),", "pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) pdf.update_parameters_limits(sigma1=(1, 2)) pdf.update_parameters_fixed(sigma1=True) iminuit_kwargs = pdf.iminuit_kwargs assert", "parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 2", "# Test mutable limit = [2, 3] # noinspection PyTypeChecker pdf.update_parameters_limits(mean=limit) assert tuple(pdf.parameters[\"mean\"].limits)", "normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) pdf.update_parameters_limits(sigma1=(1, 2)) pdf.update_parameters_fixed(sigma1=True) iminuit_kwargs = pdf.iminuit_kwargs assert len(iminuit_kwargs) == 9", "== 2 assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0,", "PDF.__subclasses__()) def test_pdf_subclasses(PDFSubclass): pdf = PDFSubclass(n_illuminations=1) x = np.linspace(-5, 100, 1000) y =", "numpy as np from numpy.testing import assert_allclose import pytest def test_pdf_parameter(): initial =", "PDF(2, normal_pdf, parameters) x = np.linspace(-1, 6, 100) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 0),", "pdf.parameters[\"mean\"].limits == (-3, 3) assert pdf.parameters[\"sigma\"].limits == (0, 4) # Test mutable limit", "3]])) key_array = np.array(list(pdf.parameters.keys())) assert np.array_equal(key_array[pdf._lookup[0]], [\"mean0\", \"sigma0\"]) assert np.array_equal(key_array[pdf._lookup[1]], [\"mean1\", \"sigma1\"]) def", "1], [0, 2]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2),", "3) assert pdf.parameters[\"sigma\"].limits == (0, 4) # Test mutable limit = [2, 3]", "assert_allclose import pytest def test_pdf_parameter(): initial = 1 limits = (0, 4) fixed", "assert iminuit_kwargs[\"sigma0\"] == 0.1 assert iminuit_kwargs[\"sigma1\"] == 0.2 assert iminuit_kwargs[\"limit_mean\"] == (-2, 2)", "PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_pdf_subclasses(PDFSubclass): pdf = PDFSubclass(n_illuminations=1) x = np.linspace(-5, 100, 1000)", "pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4)", "pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.4 pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5) assert pdf.parameters[\"mean\"].initial ==", "== 4 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0,", "9 assert iminuit_kwargs[\"mean\"] == 0 assert iminuit_kwargs[\"sigma0\"] == 0.1 assert iminuit_kwargs[\"sigma1\"] == 0.2", "assert pdf.function == normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 4 assert", "len(is_multi) == 2 assert len(lookup) == 2 assert len(lookup[0]) == 2 def test_initial():", "noinspection PyTypeChecker pdf(x, [0, 0.1, 0.2], 1) def test_update_parameters_initial(): parameters = dict( mean=PDFParameter(initial=0,", "0 assert pdf.parameters[\"sigma\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma\"].initial", "limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.3)", "sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma\"].initial == 0.4 parameters = dict( mean=PDFParameter(initial=0,", "np.array_equal(pdf._lookup, np.array([[0, 1]])) pdf = PDF(2, normal_pdf, parameters) assert pdf.function == normal_pdf assert", ") pdf = PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].limits == (-2, 2) assert pdf.parameters[\"sigma\"].limits", "np.array([0, 0.1])) assert np.array_equal(pdf._lookup_parameters(initial, 1), np.array([0, 0.3])) def test_call(): parameters = dict( mean=PDFParameter(initial=0,", "parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(2,", "multi=True), ) results = PDF._prepare_parameters(parameters, 2) parameters, is_multi, lookup = results assert len(parameters)", "= results assert len(parameters) == 4 assert len(is_multi) == 2 assert len(lookup) ==", "== 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma0\"].initial == 0.4 assert", "assert iminuit_kwargs[\"fix_sigma1\"] is True # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_pdf_subclasses(PDFSubclass): pdf =", "normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) assert pdf.initial == dict(mean=0, sigma0=0.1, sigma1=0.2) def test_n_free_parameters(): parameters =", "== 2 assert len(pdf.parameters) == 3 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial ==", "= (0, 4) fixed = True multi = True param = PDFParameter(initial=initial, limits=limits,", "y = pdf(x, np.array(list(pdf.initial.values())), 0) lambda_ = pdf.initial[\"lambda_0\"] pedestal_contribution = np.exp(-lambda_) np.testing.assert_allclose(np.trapz(y, x),", "== 2 assert pdf.parameters[\"sigma\"].initial == 0.4 parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1,", "3 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1],", "limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].fixed", "parameters) pdf.update_parameters_initial(sigma1=0.3) initial = np.array(list(pdf.initial.values())) assert np.array_equal(pdf._lookup_parameters(initial, 0), np.array([0, 0.1])) assert np.array_equal(pdf._lookup_parameters(initial, 1),", "pdf.parameters[\"sigma\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma\"].initial == 0.4", "parameters) assert pdf.parameter_names == [\"mean\", \"sigma0\", \"sigma1\"] def test_iminuit_kwargs(): parameters = dict( mean=PDFParameter(initial=0,", "PDF(1, normal_pdf, parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations == 1 assert len(pdf.parameters)", "iminuit_kwargs[\"mean\"] == 0 assert iminuit_kwargs[\"sigma0\"] == 0.1 assert iminuit_kwargs[\"sigma1\"] == 0.2 assert iminuit_kwargs[\"limit_mean\"]", "multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) results = PDF._prepare_parameters(parameters, 1) parameters, is_multi, lookup", "parameters) assert pdf.n_free_parameters == 3 pdf.update_parameters_fixed(sigma1=True) assert pdf.n_free_parameters == 2 def test_parameter_names(): parameters", "= PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) pdf.update_parameters_limits(sigma1=(1, 2)) pdf.update_parameters_fixed(sigma1=True) iminuit_kwargs = pdf.iminuit_kwargs assert len(iminuit_kwargs)", "= np.linspace(-1, 6, 100) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 0), pdf._function(x, 0, 0.1)) assert_allclose(pdf(x,", "== 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 2]])) parameters", "limit = [2, 3] # noinspection PyTypeChecker pdf.update_parameters_limits(mean=limit) assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3)", "pdf.parameters[\"sigma\"].fixed is True # noinspection DuplicatedCode def test_prepare_multi_illumination_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "np.array_equal(pdf._lookup_parameters(initial, 0), np.array([0, 0.1])) assert np.array_equal(pdf._lookup_parameters(initial, 1), np.array([0, 0.3])) def test_call(): parameters =", "key_array = np.array(list(pdf.parameters.keys())) assert np.array_equal(key_array[pdf._lookup[0]], [\"mean0\", \"sigma0\"]) assert np.array_equal(key_array[pdf._lookup[1]], [\"mean1\", \"sigma1\"]) def test_lookup_parameters():", "def test_call(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), )", "2) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1]), 1) with pytest.raises(TypeError): # noinspection PyTypeChecker pdf(x,", "assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 2], [1,", "test_from_name(): pdf = PDF.from_name(\"SiPMGentile\", n_illuminations=1) assert pdf.__class__.__name__ == \"SiPMGentile\" with pytest.raises(ValueError): PDF.from_name(\"NULL\", n_illuminations=1)", "0.1])) assert np.array_equal(pdf._lookup_parameters(initial, 1), np.array([0, 0.3])) def test_call(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2,", "True # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_pdf_subclasses(PDFSubclass): pdf = PDFSubclass(n_illuminations=1) x =", "limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.function == normal_pdf", "== dict(mean=0, sigma0=0.1, sigma1=0.2) def test_n_free_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1,", "test_parameter_names(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf", "0) lambda_ = pdf.initial[\"lambda_0\"] pedestal_contribution = np.exp(-lambda_) np.testing.assert_allclose(np.trapz(y, x), 1 - pedestal_contribution, rtol=1e-3)", "pdf = PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma0\"].initial == 0.1", "(0, 4) # Test mutable limit = [2, 3] # noinspection PyTypeChecker pdf.update_parameters_limits(mean=limit)", "parameters) assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial", "= PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma\"].initial == 0.1 pdf.update_parameters_initial(mean=2,", "0.1, 0.2]), 0), pdf._function(x, 0, 0.1)) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 1), pdf._function(x, 0,", "sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].limits == (-2,", "iminuit_kwargs[\"fix_sigma1\"] is True # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_pdf_subclasses(PDFSubclass): pdf = PDFSubclass(n_illuminations=1)", "parameters, is_multi, lookup = results assert len(parameters) == 2 assert len(is_multi) == 2", "# noinspection PyTypeChecker pdf(x, [0, 0.1, 0.2], 1) def test_update_parameters_initial(): parameters = dict(", "is False assert pdf.parameters[\"sigma\"].fixed is False pdf.update_parameters_fixed(mean=True, sigma=True) assert pdf.parameters[\"mean\"].fixed is True assert", "is False pdf.update_parameters_fixed(mean=True, sigma=True) assert pdf.parameters[\"mean\"].fixed is True assert pdf.parameters[\"sigma\"].fixed is True #", "pytest.raises(ValueError): pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5, sigma2=0.5) def test_update_parameters_limits(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)),", "2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations", "sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.function == normal_pdf", "1) with pytest.raises(TypeError): # noinspection PyTypeChecker pdf(x, [0, 0.1, 0.2], 1) def test_update_parameters_initial():", "PDFParameter(initial=initial, limits=limits, fixed=fixed, multi=multi) assert param.initial == initial assert param.limits == limits assert", "np.array([0, 0.3])) def test_call(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2),", "pdf.parameters[\"mean\"].fixed is False assert pdf.parameters[\"sigma\"].fixed is False pdf.update_parameters_fixed(mean=True, sigma=True) assert pdf.parameters[\"mean\"].fixed is True", "mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(2, normal_pdf, parameters) assert", "0, 0.1)) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 1), pdf._function(x, 0, 0.2)) with pytest.raises(IndexError): pdf(x,", "x = np.linspace(-1, 6, 100) assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 0), pdf._function(x, 0, 0.1))", "= pdf.initial[\"lambda_0\"] pedestal_contribution = np.exp(-lambda_) np.testing.assert_allclose(np.trapz(y, x), 1 - pedestal_contribution, rtol=1e-3) def test_from_name():", "\"sigma1\"] def test_iminuit_kwargs(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),", "(2, 3) limit[0] = 1 assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) def test_update_parameters_fixed(): parameters", "== 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 2], [1, 3]])) key_array = np.array(list(pdf.parameters.keys())) assert np.array_equal(key_array[pdf._lookup[0]],", "@pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_pdf_subclasses(PDFSubclass): pdf = PDFSubclass(n_illuminations=1) x = np.linspace(-5, 100, 1000) y", "pdf = PDFSubclass(n_illuminations=1) x = np.linspace(-5, 100, 1000) y = pdf(x, np.array(list(pdf.initial.values())), 0)", "len(is_multi) == 2 assert len(lookup) == 1 assert len(lookup[0]) == 2 parameters =", "== 2 parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),", "assert pdf.function == normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 2 assert", "(-3, 3) assert pdf.parameters[\"sigma\"].limits == (0, 4) # Test mutable limit = [2,", "2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.3) initial", "initial assert param.limits == limits assert param.fixed is fixed assert param.multi is multi", "len(pdf.parameters) == 4 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup,", "4 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup, np.array([[0, 2],", "sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.n_free_parameters ==", "= np.array(list(pdf.parameters.keys())) assert np.array_equal(key_array[pdf._lookup[0]], [\"mean0\", \"sigma0\"]) assert np.array_equal(key_array[pdf._lookup[1]], [\"mean1\", \"sigma1\"]) def test_lookup_parameters(): parameters", "assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 1), pdf._function(x, 0, 0.2)) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1,", "limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.n_free_parameters == 3", "== 0.4 parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), )", "mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) results = PDF._prepare_parameters(parameters, 1)", "test_disable_pedestal(PDFSubclass): pdf = PDFSubclass(n_illuminations=1, disable_pedestal=True) x = np.linspace(-5, 100, 1000) y = pdf(x,", "parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 4", "multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) pdf.update_parameters_limits(sigma1=(1, 2)) pdf.update_parameters_fixed(sigma1=True) iminuit_kwargs =", "assert pdf.parameters[\"sigma0\"].initial == 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.5 with pytest.raises(ValueError): pdf.update_parameters_initial(mean0=2, sigma0=0.4, sigma1=0.5)", "def test_prepare_multi_illumination_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) results", ") pdf = PDF(2, normal_pdf, parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations ==", "0.2]), 2) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1]), 1) with pytest.raises(TypeError): # noinspection PyTypeChecker", "mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters)", "limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0", "x), 1, rtol=1e-3) # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_disable_pedestal(PDFSubclass): pdf = PDFSubclass(n_illuminations=1,", "PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial", "0 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial", "# noinspection PyTypeChecker pdf.update_parameters_limits(mean=limit) assert tuple(pdf.parameters[\"mean\"].limits) == (2, 3) limit[0] = 1 assert", "pytest.raises(ValueError): pdf.update_parameters_initial(mean0=2, sigma0=0.4, sigma1=0.5) with pytest.raises(ValueError): pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5, sigma2=0.5) def test_update_parameters_limits(): parameters", "import numpy as np from numpy.testing import assert_allclose import pytest def test_pdf_parameter(): initial", "normal_pdf assert pdf.n_illuminations == 2 assert len(pdf.parameters) == 3 assert pdf.parameters[\"sigma0\"].initial == 0.1", "test_iminuit_kwargs(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf", "param.fixed is fixed assert param.multi is multi param = PDFParameter(initial=initial, limits=limits) assert param.initial", "2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) x =", "== [\"mean\", \"sigma0\", \"sigma1\"] def test_iminuit_kwargs(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1,", "== limits assert param.fixed is False assert param.multi is False def test_pdf_class(): parameters", "test_call(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf", "def test_parameter_names(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), )", "pdf.parameters[\"mean\"].fixed is True assert pdf.parameters[\"sigma\"].fixed is True # noinspection DuplicatedCode def test_prepare_multi_illumination_parameters(): parameters", "= dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(2, normal_pdf,", "iminuit_kwargs[\"limit_mean\"] == (-2, 2) assert iminuit_kwargs[\"limit_sigma0\"] == (0, 2) assert iminuit_kwargs[\"limit_sigma1\"] == (1,", "limits=(0, 2)), ) pdf = PDF(1, normal_pdf, parameters) assert pdf.parameters[\"mean\"].limits == (-2, 2)", "== 0 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert", "= pdf.iminuit_kwargs assert len(iminuit_kwargs) == 9 assert iminuit_kwargs[\"mean\"] == 0 assert iminuit_kwargs[\"sigma0\"] ==", "param.fixed is False assert param.multi is False def test_pdf_class(): parameters = dict( mean=PDFParameter(initial=0,", "dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2,", "= PDF(2, normal_pdf, parameters) assert pdf.n_free_parameters == 3 pdf.update_parameters_fixed(sigma1=True) assert pdf.n_free_parameters == 2", "= PDFSubclass(n_illuminations=1) x = np.linspace(-5, 100, 1000) y = pdf(x, np.array(list(pdf.initial.values())), 0) np.testing.assert_allclose(np.trapz(y,", "0.1, 0.2]), 2) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1]), 1) with pytest.raises(TypeError): # noinspection", "normal_pdf, parameters) assert pdf.parameters[\"mean\"].fixed is False assert pdf.parameters[\"sigma\"].fixed is False pdf.update_parameters_fixed(mean=True, sigma=True) assert", "multi=True), ) pdf = PDF(2, normal_pdf, parameters) x = np.linspace(-1, 6, 100) assert_allclose(pdf(x,", "= dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) results = PDF._prepare_parameters(parameters, 1)", "np.array([0, 0.1, 0.2]), 2) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1]), 1) with pytest.raises(TypeError): #", "== (-3, 3) assert pdf.parameters[\"sigma\"].limits == (0, 4) # Test mutable limit =", "True # noinspection DuplicatedCode def test_prepare_multi_illumination_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1,", "== 0.1 pdf.update_parameters_initial(mean=2, sigma=0.4) assert pdf.parameters[\"mean\"].initial == 2 assert pdf.parameters[\"sigma\"].initial == 0.4 parameters", "pdf.n_illuminations == 1 assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial == 0.1 assert np.array_equal(pdf._lookup,", ") pdf = PDF(1, normal_pdf, parameters) assert pdf.function == normal_pdf assert pdf.n_illuminations ==", "assert iminuit_kwargs[\"limit_sigma1\"] == (1, 2) assert iminuit_kwargs[\"fix_mean\"] is False assert iminuit_kwargs[\"fix_sigma0\"] is False", "limits=limits, fixed=fixed, multi=multi) assert param.initial == initial assert param.limits == limits assert param.fixed", "is True assert pdf.parameters[\"sigma\"].fixed is True # noinspection DuplicatedCode def test_prepare_multi_illumination_parameters(): parameters =", "PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def test_disable_pedestal(PDFSubclass): pdf = PDFSubclass(n_illuminations=1, disable_pedestal=True) x = np.linspace(-5, 100,", "test_update_parameters_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf =", "== (1, 2) assert iminuit_kwargs[\"fix_mean\"] is False assert iminuit_kwargs[\"fix_sigma0\"] is False assert iminuit_kwargs[\"fix_sigma1\"]", ") pdf = PDF(2, normal_pdf, parameters) x = np.linspace(-1, 6, 100) assert_allclose(pdf(x, np.array([0,", "assert len(iminuit_kwargs) == 9 assert iminuit_kwargs[\"mean\"] == 0 assert iminuit_kwargs[\"sigma0\"] == 0.1 assert", "False assert pdf.parameters[\"sigma\"].fixed is False pdf.update_parameters_fixed(mean=True, sigma=True) assert pdf.parameters[\"mean\"].fixed is True assert pdf.parameters[\"sigma\"].fixed", "pedestal_contribution, rtol=1e-3) def test_from_name(): pdf = PDF.from_name(\"SiPMGentile\", n_illuminations=1) assert pdf.__class__.__name__ == \"SiPMGentile\" with", "sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) pdf.update_parameters_limits(sigma1=(1, 2))", "limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameter_names == [\"mean\",", "multi=True), ) results = PDF._prepare_parameters(parameters, 1) parameters, is_multi, lookup = results assert len(parameters)", "iminuit_kwargs[\"fix_sigma0\"] is False assert iminuit_kwargs[\"fix_sigma1\"] is True # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\", PDF.__subclasses__()) def", "2) assert pdf.parameters[\"sigma\"].limits == (0, 2) pdf.update_parameters_limits(mean=(-3, 3), sigma=(0, 4)) assert pdf.parameters[\"mean\"].limits ==", "test_initial(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf", "sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.parameters[\"mean\"].initial == 0", "2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert pdf.function == normal_pdf assert", "results assert len(parameters) == 2 assert len(is_multi) == 2 assert len(lookup) == 1", "parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) results", "2 assert len(is_multi) == 2 assert len(lookup) == 1 assert len(lookup[0]) == 2", "pdf.parameter_names == [\"mean\", \"sigma0\", \"sigma1\"] def test_iminuit_kwargs(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)),", "mutable limit = [2, 3] # noinspection PyTypeChecker pdf.update_parameters_limits(mean=limit) assert tuple(pdf.parameters[\"mean\"].limits) == (2,", "initial = 1 limits = (0, 4) fixed = True multi = True", "2 assert len(lookup) == 1 assert len(lookup[0]) == 2 parameters = dict( mean=PDFParameter(initial=0,", "pdf.function == normal_pdf assert pdf.n_illuminations == 1 assert len(pdf.parameters) == 2 assert pdf.parameters[\"sigma\"].initial", "limits=limits) assert param.initial == initial assert param.limits == limits assert param.fixed is False", "limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) x", "len(lookup[0]) == 2 parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2),", "test_lookup_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf", "== 0.4 assert pdf.parameters[\"sigma1\"].initial == 0.4 pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5) assert pdf.parameters[\"mean\"].initial == 2", "2), multi=True), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) assert", "parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) results = PDF._prepare_parameters(parameters,", "[\"mean\", \"sigma0\", \"sigma1\"] def test_iminuit_kwargs(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0,", "iminuit_kwargs[\"sigma0\"] == 0.1 assert iminuit_kwargs[\"sigma1\"] == 0.2 assert iminuit_kwargs[\"limit_mean\"] == (-2, 2) assert", "len(parameters) == 2 assert len(is_multi) == 2 assert len(lookup) == 1 assert len(lookup[0])", "pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.3) initial = np.array(list(pdf.initial.values())) assert np.array_equal(pdf._lookup_parameters(initial, 0), np.array([0,", "= PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.3) initial = np.array(list(pdf.initial.values())) assert np.array_equal(pdf._lookup_parameters(initial, 0), np.array([0, 0.1]))", "pdf.parameters[\"sigma1\"].initial == 0.5 with pytest.raises(ValueError): pdf.update_parameters_initial(mean0=2, sigma0=0.4, sigma1=0.5) with pytest.raises(ValueError): pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5,", "PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) pdf.update_parameters_limits(sigma1=(1, 2)) pdf.update_parameters_fixed(sigma1=True) iminuit_kwargs = pdf.iminuit_kwargs assert len(iminuit_kwargs) ==", "0, 0.2)) with pytest.raises(IndexError): pdf(x, np.array([0, 0.1, 0.2]), 2) with pytest.raises(IndexError): pdf(x, np.array([0,", "0.5 with pytest.raises(ValueError): pdf.update_parameters_initial(mean0=2, sigma0=0.4, sigma1=0.5) with pytest.raises(ValueError): pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5, sigma2=0.5) def", "(1, 2) assert iminuit_kwargs[\"fix_mean\"] is False assert iminuit_kwargs[\"fix_sigma0\"] is False assert iminuit_kwargs[\"fix_sigma1\"] is", "= dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2)), ) pdf = PDF(1, normal_pdf,", "sigma0=0.1, sigma1=0.2) def test_n_free_parameters(): parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2)), sigma=PDFParameter(initial=0.1, limits=(0, 2),", "pdf.update_parameters_initial(sigma1=0.2) pdf.update_parameters_limits(sigma1=(1, 2)) pdf.update_parameters_fixed(sigma1=True) iminuit_kwargs = pdf.iminuit_kwargs assert len(iminuit_kwargs) == 9 assert iminuit_kwargs[\"mean\"]", "False assert iminuit_kwargs[\"fix_sigma0\"] is False assert iminuit_kwargs[\"fix_sigma1\"] is True # noinspection PyPep8Naming,PyArgumentList @pytest.mark.parametrize(\"PDFSubclass\",", "lambda_ = pdf.initial[\"lambda_0\"] pedestal_contribution = np.exp(-lambda_) np.testing.assert_allclose(np.trapz(y, x), 1 - pedestal_contribution, rtol=1e-3) def", "assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 2]])) parameters = dict( mean=PDFParameter(initial=0, limits=(-2, 2), multi=True),", "PDF._prepare_parameters(parameters, 1) parameters, is_multi, lookup = results assert len(parameters) == 2 assert len(is_multi)", "len(pdf.parameters) == 3 assert pdf.parameters[\"sigma0\"].initial == 0.1 assert pdf.parameters[\"sigma1\"].initial == 0.1 assert np.array_equal(pdf._lookup,", "2)), ) results = PDF._prepare_parameters(parameters, 1) parameters, is_multi, lookup = results assert len(parameters)", "2)), sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True), ) pdf = PDF(2, normal_pdf, parameters) pdf.update_parameters_initial(sigma1=0.2) assert" ]
[ "License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "writing, software # distributed under the License is distributed on an \"AS IS\"", "return elif not json_body: content_type = \"application/json\" json_body = {\"error\": {\"status\": status, \"message\":", "self.headerlist: self.headerlist = [] self.headerlist.append(('Retry-After', str(retry_after))) class BlocklistResponse(Response): \"\"\" defines the blocklist response", "governing permissions and limitations # under the License. import json from webob import", "language governing permissions and limitations # under the License. import json from webob", "Unless required by applicable law or agreed to in writing, software # distributed", "status, \"message\": \"Too Many Requests\"}} super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, json_body=json.dumps(json_body), charset=\"UTF-8\", )", "if not self.headerlist: self.headerlist = [] self.headerlist.append(('Retry-After', str(retry_after))) class BlocklistResponse(Response): \"\"\" defines the", "See the # License for the specific language governing permissions and limitations #", "{\"error\": {\"status\": status, \"message\": \"You have been blocklisted\"}} super(BlocklistResponse, self).__init__( status=status, headerlist=headers, content_type=content_type,", "super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" ) return elif not json_body: content_type", "\"License\"); you may # not use this file except in compliance with the", "json from webob import Response class RateLimitExceededResponse(Response): \"\"\" defines the rate limit response", "import Response class RateLimitExceededResponse(Response): \"\"\" defines the rate limit response and defaults, which", "json_body: content_type = \"application/json\" json_body = {\"error\": {\"status\": status, \"message\": \"Too Many Requests\"}}", "response json body \"\"\" if not status: status = '429 Too Many Requests'", "Apache License, Version 2.0 (the \"License\"); you may # not use this file", "the License. You may obtain # a copy of the License at #", "law or agreed to in writing, software # distributed under the License is", "may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the Apache License, Version 2.0 (the \"License\"); you may # not use this", "set_retry_after(self, retry_after): if not self.headerlist: self.headerlist = [] self.headerlist.append(('Retry-After', str(retry_after))) class BlocklistResponse(Response): \"\"\"", "= [] self.headerlist.append(('Retry-After', str(retry_after))) class BlocklistResponse(Response): \"\"\" defines the blocklist response and defaults,", "headers=None, content_type=None, body=None, json_body=None): \"\"\" creates a new BlocklistResponse with either a body", "'403 Forbidden' if body: super(BlocklistResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" ) return", "\"\"\" defines the rate limit response and defaults, which can be overwritten via", "json body \"\"\" if not status: status = '403 Forbidden' if body: super(BlocklistResponse,", "content_type = \"application/json\" json_body = {\"error\": {\"status\": status, \"message\": \"You have been blocklisted\"}}", "express or implied. See the # License for the specific language governing permissions", "json_body: content_type = \"application/json\" json_body = {\"error\": {\"status\": status, \"message\": \"You have been", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "CONDITIONS OF ANY KIND, either express or implied. See the # License for", "not use this file except in compliance with the License. You may obtain", ":param body: the response body :param json_body: the response json body \"\"\" if", "BlocklistResponse with either a body or json_body :param status: the status code :param", "Forbidden' if body: super(BlocklistResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" ) return elif", "charset=\"UTF-8\" ) return elif not json_body: content_type = \"application/json\" json_body = {\"error\": {\"status\":", "or json_body :param status: the status code :param headers: list of header dictionaries", "\"\"\" def __init__(self, status=None, headers=None, content_type=None, body=None, json_body=None): \"\"\" creates a new RateLimitExceededResponse", "body=None, json_body=None): \"\"\" creates a new RateLimitExceededResponse with either a body or json_body", "response json body \"\"\" if not status: status = '403 Forbidden' if body:", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "with the License. You may obtain # a copy of the License at", "self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" ) return elif not json_body: content_type =", "for the specific language governing permissions and limitations # under the License. import", "= {\"error\": {\"status\": status, \"message\": \"You have been blocklisted\"}} super(BlocklistResponse, self).__init__( status=status, headerlist=headers,", "SAP SE # # Licensed under the Apache License, Version 2.0 (the \"License\");", "retry_after): if not self.headerlist: self.headerlist = [] self.headerlist.append(('Retry-After', str(retry_after))) class BlocklistResponse(Response): \"\"\" defines", "Copyright 2018 SAP SE # # Licensed under the Apache License, Version 2.0", "__init__(self, status=None, headers=None, content_type=None, body=None, json_body=None): \"\"\" creates a new BlocklistResponse with either", "Many Requests\"}} super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, json_body=json.dumps(json_body), charset=\"UTF-8\", ) def set_retry_after(self, retry_after):", "not self.headerlist: self.headerlist = [] self.headerlist.append(('Retry-After', str(retry_after))) class BlocklistResponse(Response): \"\"\" defines the blocklist", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may # not", "blocklist response and defaults, which can be overwritten via configuration. \"\"\" def __init__(self,", "dictionaries :param body: the response body :param json_body: the response json body \"\"\"", "License for the specific language governing permissions and limitations # under the License.", "the License. import json from webob import Response class RateLimitExceededResponse(Response): \"\"\" defines the", "can be overwritten via configuration. \"\"\" def __init__(self, status=None, headers=None, content_type=None, body=None, json_body=None):", "a new BlocklistResponse with either a body or json_body :param status: the status", "2018 SAP SE # # Licensed under the Apache License, Version 2.0 (the", "[] self.headerlist.append(('Retry-After', str(retry_after))) class BlocklistResponse(Response): \"\"\" defines the blocklist response and defaults, which", "\"message\": \"Too Many Requests\"}} super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, json_body=json.dumps(json_body), charset=\"UTF-8\", ) def", "content_type=None, body=None, json_body=None): \"\"\" creates a new BlocklistResponse with either a body or", "rate limit response and defaults, which can be overwritten via configuration. \"\"\" def", "2.0 (the \"License\"); you may # not use this file except in compliance", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "and defaults, which can be overwritten via configuration. \"\"\" def __init__(self, status=None, headers=None,", "def __init__(self, status=None, headers=None, content_type=None, body=None, json_body=None): \"\"\" creates a new RateLimitExceededResponse with", "the response json body \"\"\" if not status: status = '429 Too Many", "self).__init__( status=status, headerlist=headers, content_type=content_type, json_body=json.dumps(json_body), charset=\"UTF-8\", ) def set_retry_after(self, retry_after): if not self.headerlist:", "'429 Too Many Requests' if body: super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\"", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "from webob import Response class RateLimitExceededResponse(Response): \"\"\" defines the rate limit response and", "use this file except in compliance with the License. You may obtain #", "content_type=None, body=None, json_body=None): \"\"\" creates a new RateLimitExceededResponse with either a body or", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "charset=\"UTF-8\", ) def set_retry_after(self, retry_after): if not self.headerlist: self.headerlist = [] self.headerlist.append(('Retry-After', str(retry_after)))", "= '403 Forbidden' if body: super(BlocklistResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" )", "elif not json_body: content_type = \"application/json\" json_body = {\"error\": {\"status\": status, \"message\": \"You", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #", "compliance with the License. You may obtain # a copy of the License", "response and defaults, which can be overwritten via configuration. \"\"\" def __init__(self, status=None,", "webob import Response class RateLimitExceededResponse(Response): \"\"\" defines the rate limit response and defaults,", "response body :param json_body: the response json body \"\"\" if not status: status", "status = '429 Too Many Requests' if body: super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type,", "content_type = \"application/json\" json_body = {\"error\": {\"status\": status, \"message\": \"Too Many Requests\"}} super(RateLimitExceededResponse,", "License, Version 2.0 (the \"License\"); you may # not use this file except", "= \"application/json\" json_body = {\"error\": {\"status\": status, \"message\": \"You have been blocklisted\"}} super(BlocklistResponse,", "configuration. \"\"\" def __init__(self, status=None, headers=None, content_type=None, body=None, json_body=None): \"\"\" creates a new", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "\"\"\" creates a new BlocklistResponse with either a body or json_body :param status:", "\"Too Many Requests\"}} super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, json_body=json.dumps(json_body), charset=\"UTF-8\", ) def set_retry_after(self,", "= {\"error\": {\"status\": status, \"message\": \"Too Many Requests\"}} super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type,", "the blocklist response and defaults, which can be overwritten via configuration. \"\"\" def", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "implied. See the # License for the specific language governing permissions and limitations", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "OF ANY KIND, either express or implied. See the # License for the", "headers=None, content_type=None, body=None, json_body=None): \"\"\" creates a new RateLimitExceededResponse with either a body", "json_body: the response json body \"\"\" if not status: status = '429 Too", "# under the License. import json from webob import Response class RateLimitExceededResponse(Response): \"\"\"", ") return elif not json_body: content_type = \"application/json\" json_body = {\"error\": {\"status\": status,", "new BlocklistResponse with either a body or json_body :param status: the status code", "{\"error\": {\"status\": status, \"message\": \"Too Many Requests\"}} super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, json_body=json.dumps(json_body),", "body \"\"\" if not status: status = '403 Forbidden' if body: super(BlocklistResponse, self).__init__(", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "\"\"\" creates a new RateLimitExceededResponse with either a body or json_body :param status:", "defines the rate limit response and defaults, which can be overwritten via configuration.", "a body or json_body :param status: the status code :param headers: list of", "if body: super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" ) return elif not", "the status code :param headers: list of header dictionaries :param body: the response", "Requests\"}} super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, json_body=json.dumps(json_body), charset=\"UTF-8\", ) def set_retry_after(self, retry_after): if", "= '429 Too Many Requests' if body: super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body,", "super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, json_body=json.dumps(json_body), charset=\"UTF-8\", ) def set_retry_after(self, retry_after): if not", "status: status = '403 Forbidden' if body: super(BlocklistResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body,", "json_body=None): \"\"\" creates a new BlocklistResponse with either a body or json_body :param", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "body \"\"\" if not status: status = '429 Too Many Requests' if body:", "{\"status\": status, \"message\": \"You have been blocklisted\"}} super(BlocklistResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, json_body=json.dumps(json_body),", "status=None, headers=None, content_type=None, body=None, json_body=None): \"\"\" creates a new BlocklistResponse with either a", "you may # not use this file except in compliance with the License.", "# Copyright 2018 SAP SE # # Licensed under the Apache License, Version", "RateLimitExceededResponse(Response): \"\"\" defines the rate limit response and defaults, which can be overwritten", "def set_retry_after(self, retry_after): if not self.headerlist: self.headerlist = [] self.headerlist.append(('Retry-After', str(retry_after))) class BlocklistResponse(Response):", "RateLimitExceededResponse with either a body or json_body :param status: the status code :param", ":param json_body: the response json body \"\"\" if not status: status = '429", "agreed to in writing, software # distributed under the License is distributed on", "status: the status code :param headers: list of header dictionaries :param body: the", "json_body=None): \"\"\" creates a new RateLimitExceededResponse with either a body or json_body :param", "if body: super(BlocklistResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" ) return elif not", "(the \"License\"); you may # not use this file except in compliance with", "status=status, headerlist=headers, content_type=content_type, json_body=json.dumps(json_body), charset=\"UTF-8\", ) def set_retry_after(self, retry_after): if not self.headerlist: self.headerlist", "json_body = {\"error\": {\"status\": status, \"message\": \"You have been blocklisted\"}} super(BlocklistResponse, self).__init__( status=status,", "headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" ) return elif not json_body: content_type = \"application/json\" json_body", "KIND, either express or implied. See the # License for the specific language", "may # not use this file except in compliance with the License. You", "body=None, json_body=None): \"\"\" creates a new BlocklistResponse with either a body or json_body", "either express or implied. See the # License for the specific language governing", "Requests' if body: super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" ) return elif", "{\"status\": status, \"message\": \"Too Many Requests\"}} super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, json_body=json.dumps(json_body), charset=\"UTF-8\",", "status = '403 Forbidden' if body: super(BlocklistResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\"", "__init__(self, status=None, headers=None, content_type=None, body=None, json_body=None): \"\"\" creates a new RateLimitExceededResponse with either", "# # Unless required by applicable law or agreed to in writing, software", "\"application/json\" json_body = {\"error\": {\"status\": status, \"message\": \"You have been blocklisted\"}} super(BlocklistResponse, self).__init__(", "a new RateLimitExceededResponse with either a body or json_body :param status: the status", "file except in compliance with the License. You may obtain # a copy", "\"application/json\" json_body = {\"error\": {\"status\": status, \"message\": \"Too Many Requests\"}} super(RateLimitExceededResponse, self).__init__( status=status,", "this file except in compliance with the License. You may obtain # a", "and limitations # under the License. import json from webob import Response class", "# Unless required by applicable law or agreed to in writing, software #", "json_body=json.dumps(json_body), charset=\"UTF-8\", ) def set_retry_after(self, retry_after): if not self.headerlist: self.headerlist = [] self.headerlist.append(('Retry-After',", "list of header dictionaries :param body: the response body :param json_body: the response", "by applicable law or agreed to in writing, software # distributed under the", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "self.headerlist.append(('Retry-After', str(retry_after))) class BlocklistResponse(Response): \"\"\" defines the blocklist response and defaults, which can", "\"\"\" def __init__(self, status=None, headers=None, content_type=None, body=None, json_body=None): \"\"\" creates a new BlocklistResponse", "limit response and defaults, which can be overwritten via configuration. \"\"\" def __init__(self,", "the rate limit response and defaults, which can be overwritten via configuration. \"\"\"", "permissions and limitations # under the License. import json from webob import Response", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "or implied. See the # License for the specific language governing permissions and", "either a body or json_body :param status: the status code :param headers: list", "overwritten via configuration. \"\"\" def __init__(self, status=None, headers=None, content_type=None, body=None, json_body=None): \"\"\" creates", "with either a body or json_body :param status: the status code :param headers:", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "status: status = '429 Too Many Requests' if body: super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers,", "class BlocklistResponse(Response): \"\"\" defines the blocklist response and defaults, which can be overwritten", "super(BlocklistResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" ) return elif not json_body: content_type", "the response body :param json_body: the response json body \"\"\" if not status:", "the specific language governing permissions and limitations # under the License. import json", "Response class RateLimitExceededResponse(Response): \"\"\" defines the rate limit response and defaults, which can", "status code :param headers: list of header dictionaries :param body: the response body", "status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" ) return elif not json_body: content_type = \"application/json\"", "body: super(BlocklistResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" ) return elif not json_body:", "License. You may obtain # a copy of the License at # #", "License. import json from webob import Response class RateLimitExceededResponse(Response): \"\"\" defines the rate", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "body :param json_body: the response json body \"\"\" if not status: status =", "the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "creates a new BlocklistResponse with either a body or json_body :param status: the", ":param headers: list of header dictionaries :param body: the response body :param json_body:", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "not json_body: content_type = \"application/json\" json_body = {\"error\": {\"status\": status, \"message\": \"Too Many", "def __init__(self, status=None, headers=None, content_type=None, body=None, json_body=None): \"\"\" creates a new BlocklistResponse with", "which can be overwritten via configuration. \"\"\" def __init__(self, status=None, headers=None, content_type=None, body=None,", "code :param headers: list of header dictionaries :param body: the response body :param", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "str(retry_after))) class BlocklistResponse(Response): \"\"\" defines the blocklist response and defaults, which can be", "limitations # under the License. import json from webob import Response class RateLimitExceededResponse(Response):", "\"message\": \"You have been blocklisted\"}} super(BlocklistResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, json_body=json.dumps(json_body), charset=\"UTF-8\" )", "ANY KIND, either express or implied. See the # License for the specific", "the # License for the specific language governing permissions and limitations # under", "except in compliance with the License. You may obtain # a copy of", ":param json_body: the response json body \"\"\" if not status: status = '403", "via configuration. \"\"\" def __init__(self, status=None, headers=None, content_type=None, body=None, json_body=None): \"\"\" creates a", "json_body :param status: the status code :param headers: list of header dictionaries :param", "import json from webob import Response class RateLimitExceededResponse(Response): \"\"\" defines the rate limit", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "headers: list of header dictionaries :param body: the response body :param json_body: the", "json_body: the response json body \"\"\" if not status: status = '403 Forbidden'", "status, \"message\": \"You have been blocklisted\"}} super(BlocklistResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, json_body=json.dumps(json_body), charset=\"UTF-8\"", "json body \"\"\" if not status: status = '429 Too Many Requests' if", "to in writing, software # distributed under the License is distributed on an", "defines the blocklist response and defaults, which can be overwritten via configuration. \"\"\"", "You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "if not status: status = '403 Forbidden' if body: super(BlocklistResponse, self).__init__( status=status, headerlist=headers,", "content_type=content_type, json_body=json.dumps(json_body), charset=\"UTF-8\", ) def set_retry_after(self, retry_after): if not self.headerlist: self.headerlist = []", "body or json_body :param status: the status code :param headers: list of header", "required by applicable law or agreed to in writing, software # distributed under", "elif not json_body: content_type = \"application/json\" json_body = {\"error\": {\"status\": status, \"message\": \"Too", "= \"application/json\" json_body = {\"error\": {\"status\": status, \"message\": \"Too Many Requests\"}} super(RateLimitExceededResponse, self).__init__(", "applicable law or agreed to in writing, software # distributed under the License", "be overwritten via configuration. \"\"\" def __init__(self, status=None, headers=None, content_type=None, body=None, json_body=None): \"\"\"", "Too Many Requests' if body: super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" )", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT #", "OR CONDITIONS OF ANY KIND, either express or implied. See the # License", "new RateLimitExceededResponse with either a body or json_body :param status: the status code", "obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "\"\"\" defines the blocklist response and defaults, which can be overwritten via configuration.", "status=None, headers=None, content_type=None, body=None, json_body=None): \"\"\" creates a new RateLimitExceededResponse with either a", ":param status: the status code :param headers: list of header dictionaries :param body:", "not status: status = '403 Forbidden' if body: super(BlocklistResponse, self).__init__( status=status, headerlist=headers, content_type=content_type,", "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may #", "SE # # Licensed under the Apache License, Version 2.0 (the \"License\"); you", "in compliance with the License. You may obtain # a copy of the", "# not use this file except in compliance with the License. You may", "body: the response body :param json_body: the response json body \"\"\" if not", "Many Requests' if body: super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" ) return", "specific language governing permissions and limitations # under the License. import json from", "or agreed to in writing, software # distributed under the License is distributed", "class RateLimitExceededResponse(Response): \"\"\" defines the rate limit response and defaults, which can be", "if not status: status = '429 Too Many Requests' if body: super(RateLimitExceededResponse, self).__init__(", "\"\"\" if not status: status = '403 Forbidden' if body: super(BlocklistResponse, self).__init__( status=status,", "# License for the specific language governing permissions and limitations # under the", "defaults, which can be overwritten via configuration. \"\"\" def __init__(self, status=None, headers=None, content_type=None,", "headerlist=headers, content_type=content_type, json_body=json.dumps(json_body), charset=\"UTF-8\", ) def set_retry_after(self, retry_after): if not self.headerlist: self.headerlist =", "body=body, charset=\"UTF-8\" ) return elif not json_body: content_type = \"application/json\" json_body = {\"error\":", "the response json body \"\"\" if not status: status = '403 Forbidden' if", "\"\"\" if not status: status = '429 Too Many Requests' if body: super(RateLimitExceededResponse,", "json_body = {\"error\": {\"status\": status, \"message\": \"Too Many Requests\"}} super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers,", "under the Apache License, Version 2.0 (the \"License\"); you may # not use", "content_type=content_type, body=body, charset=\"UTF-8\" ) return elif not json_body: content_type = \"application/json\" json_body =", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "not status: status = '429 Too Many Requests' if body: super(RateLimitExceededResponse, self).__init__( status=status,", "body: super(RateLimitExceededResponse, self).__init__( status=status, headerlist=headers, content_type=content_type, body=body, charset=\"UTF-8\" ) return elif not json_body:", "BlocklistResponse(Response): \"\"\" defines the blocklist response and defaults, which can be overwritten via", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", ") def set_retry_after(self, retry_after): if not self.headerlist: self.headerlist = [] self.headerlist.append(('Retry-After', str(retry_after))) class", "not json_body: content_type = \"application/json\" json_body = {\"error\": {\"status\": status, \"message\": \"You have", "in writing, software # distributed under the License is distributed on an \"AS", "under the License. import json from webob import Response class RateLimitExceededResponse(Response): \"\"\" defines", "Version 2.0 (the \"License\"); you may # not use this file except in", "header dictionaries :param body: the response body :param json_body: the response json body", "creates a new RateLimitExceededResponse with either a body or json_body :param status: the", "of header dictionaries :param body: the response body :param json_body: the response json", "self.headerlist = [] self.headerlist.append(('Retry-After', str(retry_after))) class BlocklistResponse(Response): \"\"\" defines the blocklist response and" ]
[ "volume stop|stfu|skip|next - Remove the top video pop|undo|oops - Remove the bottom video", "for 'match' youtube_search_url = \"https://www.googleapis.com/youtube/v3/search\" search_data = { \"part\": \"snippet\", \"key\": self.youtube_api_key, \"order\":", "yi in video_json['items']: # sr = { # \"video_id\": yi[\"id\"], # \"url\": \"http://www.youtube.com/watch?v={0}\".format(yi[\"id\"]),", "# \"publish_time\": yi[\"snippet\"][\"publishedAt\"], # \"views\": yi[\"statistics\"][\"viewCount\"], #\"duration\": self.parse_duration(yi[\"contentDetails\"][\"duration\"]), #} #output.append(sr) self.youtube_cache[q] = output", "youtube_api_key = settings.youtube_api_key def __init__(self): print \"NLP started.\" self.youtube_cache = {} super(NLP, self).__init__()", "[] if text.startswith(\"http:\"): rs = yield self.url_suggest(text) results.extend(rs) yr = yield self.youtube_suggest(text) results.extend(yr)", "in self.suggest_commands: if sc.startswith(stripped_message): suggestions.append({ \"title\": sc, \"action\": sc, \"help\": sc_help, \"match\": len(stripped_message)", "queue] mod_bot=queue[-1] new_uids=old_uids[-1:]+old_uids[0:-1] yield self.queue_cmd(\"mv\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Bumped {0} to the top\".format(self.pretty(mod_bot))) @service.coroutine def", "yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued swearing.') @service.coroutine def cmd_fortune(self, q): fortune_args = settings.get(\"fortune_args\", ['-s'])", "\"part\": \"contentDetails,snippet,statistics\", # \"key\": self.youtube_api_key, # \"id\": \",\".join(video_ids), #} #video_form_data = urllib.urlencode(video_data) #video_results", "\"cache miss\" http_client = tornado.httpclient.AsyncHTTPClient() # Return the args dict for the first", "(socket.error,service.TimeoutError): raise Exception(\"Error communicating with volume control.\") if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result)", "u'({0})'.format(t) @service.coroutine def cmd_bug(self,q,text): bug_url = \"https://api.github.com/repos/zbanks/musicazoo/issues\" suffix = \"\\n\\nSubmitted via NLP service.\"", "else: human_str = \"{1}:{2:02d}\".format(h, m, s) return h * 360 + m *", "raise service.Return(self.youtube_cache[q]) print \"cache miss\" http_client = tornado.httpclient.AsyncHTTPClient() # Return the args dict", "{0}\".format(result.get(\"vol\", \"unknown\"))) @service.coroutine def cmd_queue(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"Queue is empty.\")", "(\"quote\", \"Display a quote from the fortune database\"), (\"image\", \"`image <url>`: Display an", "# \"part\": \"contentDetails,snippet,statistics\", # \"key\": self.youtube_api_key, # \"id\": \",\".join(video_ids), #} #video_form_data = urllib.urlencode(video_data)", "(.+)$',cmd_bug), (r'(https?://.+(?:gif|jpe?g|png|bmp))',cmd_image), (r'(https?://.+)',cmd_youtube_raw), (r'^(.+)$',cmd_yt), ] nlp = NLP() def shutdown_handler(signum,frame): print print \"Received", "\"title\": sc, \"action\": sc, \"help\": sc_help, \"match\": len(stripped_message) }) rs = yield self.wildcard_suggest(message)", "\"Remove the last item on the queue\"), (\"bump\", \"Move the last item on", "account configured in settings.json\") handler = urllib2.HTTPBasicAuthHandler(password_mgr) #TODO request(bug_url, bug_data, auth=(musicazoo-bugs, musicaz00) raise", "360 + m * 60 + s, human_str @service.coroutine def youtube_search(self,q): if q", "youtube_search_url = \"https://www.googleapis.com/youtube/v3/search\" search_data = { \"part\": \"snippet\", \"key\": self.youtube_api_key, \"order\": \"relevance\", \"safesearch\":", "raise Exception(\"Error communicating with queue.\") if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def", "cmd_say(self,q,text): yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_swear(self,q): # Swear words according", "\"contentDetails,snippet,statistics\", # \"key\": self.youtube_api_key, # \"id\": \",\".join(video_ids), #} #video_form_data = urllib.urlencode(video_data) #video_results =", "# Swear words according to yahoo chat. # See: http://ridiculousfish.com/blog/posts/YahooChatRooms.html words = \"ahole,aholes,asshole,assholes,asswipe,biatch,bitch,bitches,blo_job,blow_job,blowjob,cocksucker,cunt,cunts,dickhead,fuck,fucked,fucking,fuckoff,fucks,handjob,handjobs,motherfucker,mother-fucker,motherfuckers,muthafucker,muthafuckers,nigga,niggs,nigger,niggers,pedofile,pedophile,phag,phuc,phuck,phucked,phucker,shat,shit,shits,shithead,shitter,shitting\".split(\",\")", "m, s) return h * 360 + m * 60 + s, human_str", "service.Return(self.youtube_cache[q]) print \"cache miss\" http_client = tornado.httpclient.AsyncHTTPClient() # Return the args dict for", "fortune.\") def pretty(self,mod): t=mod['type'] if t=='youtube' and 'title' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['title']) #if", "video\"\"\") commands={ 'do': do, 'suggest': suggest, } suggest_commands = [ (\"vol up\", \"Raise", "self.youtube_cache: print \"cache hit\" raise service.Return(self.youtube_cache[q]) print \"cache miss\" http_client = tornado.httpclient.AsyncHTTPClient() #", "service.Return(results) @service.coroutine def wildcard_suggest(self, text): text = text.strip() results = [] if text.startswith(\"http:\"):", "suggestions.append({ \"title\": sc, \"action\": sc, \"help\": sc_help, \"match\": len(stripped_message) }) rs = yield", "service.ioloop.stop() @service.coroutine def cmd_set_vol(self,q,vol): if vol=='up': result=yield self.vol_cmd(\"get_vol\") vol=min(result['vol']+5,100) elif vol=='down': result=yield self.vol_cmd(\"get_vol\")", "raise service.Return(\"Queue is empty.\") result = '\\n'.join([u\"{0}. {1}\".format(n+1,self.pretty(mod)) for (n,mod) in enumerate(queue)]) raise", "= subprocess.check_output(['/usr/games/fortune'] + fortune_args) data = { 'type': 'text', 'args': { 'text': fortune_text,", "raise service.Return(\"(Nothing)\") result = self.pretty(queue[0]) raise service.Return(result) @service.coroutine def cmd_rm_top(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if", "# sr = { # \"video_id\": yi[\"id\"], # \"url\": \"http://www.youtube.com/watch?v={0}\".format(yi[\"id\"]), # \"title\": yi[\"snippet\"][\"title\"],", "vol=='up': result=yield self.vol_cmd(\"get_vol\") vol=min(result['vol']+5,100) elif vol=='down': result=yield self.vol_cmd(\"get_vol\") vol=max(result['vol']-5,0) else: vol=int(vol) if vol>100:", "'html.parser') video_ids = [] output = [] for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}): video_ids.append(vid['href'][9:]) print(vid)", "json import os import random import re import signal import socket import subprocess", "\"views\": yi[\"statistics\"][\"viewCount\"], #\"duration\": self.parse_duration(yi[\"contentDetails\"][\"duration\"]), #} #output.append(sr) self.youtube_cache[q] = output raise service.Return(output) @service.coroutine def", "\"title\": u\"{0[title]} - [{0[duration][1]}]\".format(v), \"action\": v['url'], \"help\": \"Play video from YouTube\", \"match\": 0,", "text, 'body' : text + suffix}) password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() try: password_mgr.add_password(None, bug_url, settings.github_login[0],", "the screen\"), (\"fuck\", \"Swear a bunch\"), (\"quote\", \"Display a quote from the fortune", "result=yield self.youtube_search(kw) if not result or not result[0]: raise Exception('No Youtube results found.')", "urllib2 import shmooze.lib.packet as packet import shmooze.lib.service as service import shmooze.settings as settings", "\"no\", \"duration\": (1,1)} output.append(sr) # output.append(sr) #youtube_video_url = \"https://www.googleapis.com/youtube/v3/videos\" #video_data = { #", "url = result[0][\"url\"] title = result[0][\"title\"] yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\":url}}) raise service.Return(u'Queued \"{0}\"'.format(title)) @service.coroutine def", "t=mod['type'] if t=='youtube' and 'title' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['title']) #if t=='netvid': # return", "signal import socket import subprocess import tornado.httpclient import traceback import urllib import urllib2", "via NLP service.\" bug_data = json.dumps({'title': text, 'body' : text + suffix}) password_mgr", "communicating with volume control.\") if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def do(self,message):", "None else 0 for m in matches] if h > 0: human_str =", "BeautifulSoup from urlparse import urlparse class NLP(service.JSONCommandProcessor, service.Service): port=settings.ports[\"nlp\"] queue_host='localhost' queue_port=settings.ports[\"queue\"] vol_host='localhost' vol_port=settings.ports[\"vol\"]", "settings import urllib2 from bs4 import BeautifulSoup from urlparse import urlparse class NLP(service.JSONCommandProcessor,", "\"key\": self.youtube_api_key, \"order\": \"relevance\", \"safesearch\": \"none\", \"type\": \"video\", \"max-results\": 5, \"q\": q, }", "= response.read() soup = BeautifulSoup(html, 'html.parser') video_ids = [] output = [] for", "a quote and display it on the screen\"), (\"fuck\", \"Swear a bunch\"), (\"quote\",", "fortune database\"), (\"image\", \"`image <url>`: Display an image on the screen as a", "raise service.Return(\"Volume set to {0}\".format(vol)) @service.coroutine def cmd_get_vol(self,q): result=yield self.vol_cmd(\"get_vol\") raise service.Return(\"Volume is", "self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"Queue is empty.\") result = '\\n'.join([u\"{0}. {1}\".format(n+1,self.pretty(mod)) for (n,mod)", "is empty!\") mod=queue[-1] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_bump(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params})", "hms_str h, m, s = [int(m.strip(\"HMS\")) if m is not None else 0", "result or not result[0]: raise Exception('No Youtube results found.') url = result[0][\"url\"] title", "= \"https://www.googleapis.com/youtube/v3/search\" search_data = { \"part\": \"snippet\", \"key\": self.youtube_api_key, \"order\": \"relevance\", \"safesearch\": \"none\",", "#if t=='netvid': # return u'{0}'.format(mod['parameters']['short_description']) if t=='text' and 'text' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['text'])", "# \"thumbnail\": yi[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"], # \"publish_time\": yi[\"snippet\"][\"publishedAt\"], # \"views\": yi[\"statistics\"][\"viewCount\"], #\"duration\": self.parse_duration(yi[\"contentDetails\"][\"duration\"]), #} #output.append(sr)", "youtube_search(self,q): if q in self.youtube_cache: print \"cache hit\" raise service.Return(self.youtube_cache[q]) print \"cache miss\"", "yi[\"statistics\"][\"viewCount\"], #\"duration\": self.parse_duration(yi[\"contentDetails\"][\"duration\"]), #} #output.append(sr) self.youtube_cache[q] = output raise service.Return(output) @service.coroutine def youtube_suggest(self,", "cmd_get_vol(self,q): result=yield self.vol_cmd(\"get_vol\") raise service.Return(\"Volume is {0}\".format(result.get(\"vol\", \"unknown\"))) @service.coroutine def cmd_queue(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params})", "shutdown_handler(signum,frame): print print \"Received signal, attempting graceful shutdown...\" service.ioloop.add_callback_from_signal(nlp.shutdown) signal.signal(signal.SIGTERM, shutdown_handler) signal.signal(signal.SIGINT, shutdown_handler)", "return u'\"{0}\"'.format(mod['parameters']['text']) return u'({0})'.format(t) @service.coroutine def cmd_bug(self,q,text): bug_url = \"https://api.github.com/repos/zbanks/musicazoo/issues\" suffix = \"\\n\\nSubmitted", "- [{0[duration][1]}]\".format(v), \"action\": v['url'], \"help\": \"Play video from YouTube\", \"match\": 0, }) raise", "(r'^quote$',cmd_fortune), (r'^q$',cmd_queue), (r'^queue$',cmd_queue), (r'^cur(?:rent)?$',cmd_current), (r'^say (.+)$',cmd_say), (r'^image (.+)$',cmd_image), (r'^youtube (.+)$',cmd_youtube_raw), (r'^video (.+)$',cmd_youtube_raw), (r'^bug", "if len(queue)==1: raise Exception(\"Only one thing on the queue!\") old_uids=[mod['uid'] for mod in", "service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def do(self,message): message=message.strip() for (regex,func) in self.nlp_commands: m=re.match(regex,message,re.I) if", "from YouTube\", \"match\": 0, }) raise service.Return(results) @service.coroutine def url_suggest(self, url): #TODO results", "print hms_str return 0, hms_str h, m, s = [int(m.strip(\"HMS\")) if m is", "hit\" raise service.Return(self.youtube_cache[q]) print \"cache miss\" http_client = tornado.httpclient.AsyncHTTPClient() # Return the args", "fortune_text = subprocess.check_output(['/usr/games/fortune'] + fortune_args) data = { 'type': 'text', 'args': { 'text':", "= yield self.queue_cmd(\"queue\") raise service.Return({'message':'Did '+message}) def shutdown(self): service.ioloop.stop() @service.coroutine def cmd_set_vol(self,q,vol): if", "# output.append(sr) #youtube_video_url = \"https://www.googleapis.com/youtube/v3/videos\" #video_data = { # \"part\": \"contentDetails,snippet,statistics\", # \"key\":", "except AttributeError: raise service.Return(u\"No github account configured in settings.json\") handler = urllib2.HTTPBasicAuthHandler(password_mgr) #TODO", "empty.\") result = '\\n'.join([u\"{0}. {1}\".format(n+1,self.pretty(mod)) for (n,mod) in enumerate(queue)]) raise service.Return(result) @service.coroutine def", "service.Return(result) @service.coroutine def do(self,message): message=message.strip() for (regex,func) in self.nlp_commands: m=re.match(regex,message,re.I) if m: result", "mod=queue[-1] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_bump(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0:", "#result = yield self.queue_cmd(\"queue\") raise service.Return({'message':'Did '+message}) def shutdown(self): service.ioloop.stop() @service.coroutine def cmd_set_vol(self,q,vol):", "self.vol_cmd(\"get_vol\") raise service.Return(\"Volume is {0}\".format(result.get(\"vol\", \"unknown\"))) @service.coroutine def cmd_queue(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0:", "= [] for v in videos: results.append({ \"title\": u\"{0[title]} - [{0[duration][1]}]\".format(v), \"action\": v['url'],", "pop|undo|oops - Remove the bottom video bump - Move the bottom video to", "Exception(\"Only one thing on the queue!\") old_uids=[mod['uid'] for mod in queue] mod_bot=queue[-1] new_uids=old_uids[-1:]+old_uids[0:-1]", "NLP service.\" bug_data = json.dumps({'title': text, 'body' : text + suffix}) password_mgr =", "(r'^stfu$',cmd_rm_top), (r'^skip$',cmd_rm_top), (r'^next$',cmd_rm_top), (r'^pop$',cmd_rm_bot), (r'^undo$',cmd_rm_bot), (r'^oops$',cmd_rm_bot), (r'^bump$',cmd_bump), (r'^fuck$',cmd_swear), (r'^fortune$',cmd_fortune), (r'^quote$',cmd_fortune), (r'^q$',cmd_queue), (r'^queue$',cmd_queue), (r'^cur(?:rent)?$',cmd_current),", "[] output = [] for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}): video_ids.append(vid['href'][9:]) print(vid) sr = {", "args dict for the first youtube result for 'match' youtube_search_url = \"https://www.googleapis.com/youtube/v3/search\" search_data", "import urlparse class NLP(service.JSONCommandProcessor, service.Service): port=settings.ports[\"nlp\"] queue_host='localhost' queue_port=settings.ports[\"queue\"] vol_host='localhost' vol_port=settings.ports[\"vol\"] pretty_params={'youtube':['title'], 'text': ['text']}", "[{0[duration][1]}]\".format(v), \"action\": v['url'], \"help\": \"Play video from YouTube\", \"match\": 0, }) raise service.Return(results)", "output.append(sr) #youtube_video_url = \"https://www.googleapis.com/youtube/v3/videos\" #video_data = { # \"part\": \"contentDetails,snippet,statistics\", # \"key\": self.youtube_api_key,", "{ 'text': fortune_text, #'screen_preprocessor': 'none', 'speech_preprocessor': 'pronounce_fortune', 'text2speech': 'google', 'text2screen': 'paragraph', #'renderer': 'mono_paragraph',", "= random.sample(words, 5) text = \" \".join(selection) yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued swearing.') @service.coroutine", "packet import shmooze.lib.service as service import shmooze.settings as settings import urllib2 from bs4", "yield self.queue_cmd(\"queue\") raise service.Return({'message':'Did '+message}) def shutdown(self): service.ioloop.stop() @service.coroutine def cmd_set_vol(self,q,vol): if vol=='up':", "soup = BeautifulSoup(html, 'html.parser') video_ids = [] output = [] for vid in", "cmd_youtube_raw(self,q,url): yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_image(self,q,url): yield self.queue_cmd(\"set_bg\",{\"type\":\"image\",\"args\":{\"url\": url}})", "service.Return(u'Queued swearing.') @service.coroutine def cmd_fortune(self, q): fortune_args = settings.get(\"fortune_args\", ['-s']) fortune_text = subprocess.check_output(['/usr/games/fortune']", "except: print hms_str return 0, hms_str h, m, s = [int(m.strip(\"HMS\")) if m", "shmooze.lib.packet as packet import shmooze.lib.service as service import shmooze.settings as settings import urllib2", "the screen as a background\"), (\"video\", \"`video <url>`: Play a video\"), ] #TODO:", "0 for m in matches] if h > 0: human_str = \"{0}:{1:02d}:{2:02d}\".format(h, m,", "from the fortune database\"), (\"image\", \"`image <url>`: Display an image on the screen", "[] for sc, sc_help in self.suggest_commands: if sc.startswith(stripped_message): suggestions.append({ \"title\": sc, \"action\": sc,", "self.youtube_search(kw) if not result or not result[0]: raise Exception('No Youtube results found.') url", "pretty_params={'youtube':['title'], 'text': ['text']} youtube_api_key = settings.youtube_api_key def __init__(self): print \"NLP started.\" self.youtube_cache =", "{ 'type': 'text', 'args': { 'text': fortune_text, #'screen_preprocessor': 'none', 'speech_preprocessor': 'pronounce_fortune', 'text2speech': 'google',", "= { \"video_id\": vid['href'], \"url\": \"https://www.youtube.com/watch?v={0}\".format(vid['href'][9:]), \"title\": vid['title'], \"thumbnail\": \"no\", \"publish_time\": \"no\", \"views\":", "super(NLP, self).__init__() @staticmethod def parse_duration(dstr): # Parse ISO 8601 duration strings: PT#M#S hms_str", "['text']} youtube_api_key = settings.youtube_api_key def __init__(self): print \"NLP started.\" self.youtube_cache = {} super(NLP,", "import tornado.httpclient import traceback import urllib import urllib2 import shmooze.lib.packet as packet import", "= settings.get(\"fortune_args\", ['-s']) fortune_text = subprocess.check_output(['/usr/games/fortune'] + fortune_args) data = { 'type': 'text',", "\"Swear a bunch\"), (\"quote\", \"Display a quote from the fortune database\"), (\"image\", \"`image", "return u'\"{0}\"'.format(mod['parameters']['title']) #if t=='netvid': # return u'{0}'.format(mod['parameters']['short_description']) if t=='text' and 'text' in mod['parameters']:", "re.match(r\"PT(\\d+H)?(\\d{1,2}M)?(\\d{1,2}S)\", hms_str).groups() except: print hms_str return 0, hms_str h, m, s = [int(m.strip(\"HMS\"))", "@service.coroutine def cmd_current(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"(Nothing)\") result = self.pretty(queue[0]) raise", "bug_url = \"https://api.github.com/repos/zbanks/musicazoo/issues\" suffix = \"\\n\\nSubmitted via NLP service.\" bug_data = json.dumps({'title': text,", "def cmd_rm_top(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[0] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]})", "Remove the bottom video bump - Move the bottom video to the top", "= result[0][\"url\"] title = result[0][\"title\"] yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\":url}}) raise service.Return(u'Queued \"{0}\"'.format(title)) @service.coroutine def cmd_youtube_raw(self,q,url):", "service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_bump(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\")", "in settings.json\") handler = urllib2.HTTPBasicAuthHandler(password_mgr) #TODO request(bug_url, bug_data, auth=(musicazoo-bugs, musicaz00) raise service.Return(u'Submitted bug:", "raise service.Return(\"\"\"Commands I understand: help|? - This vol - Get volume vol [num]", "thing on the queue!\") old_uids=[mod['uid'] for mod in queue] mod_bot=queue[-1] new_uids=old_uids[-1:]+old_uids[0:-1] yield self.queue_cmd(\"mv\",{\"uids\":[mod['uid']]})", "password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() try: password_mgr.add_password(None, bug_url, settings.github_login[0], settings.github_login[1]) except AttributeError: raise service.Return(u\"No github", "cmd_help(self,q): raise service.Return(\"\"\"Commands I understand: help|? - This vol - Get volume vol", "video to the top q|queue - List the queue cur|current - Give the", "+ video_form_data) #video_json = json.loads(video_results.body) #output = [] #for yi in video_json['items']: #", "'title' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['title']) #if t=='netvid': # return u'{0}'.format(mod['parameters']['short_description']) if t=='text' and", "- thanks!') @service.coroutine def cmd_help(self,q): raise service.Return(\"\"\"Commands I understand: help|? - This vol", "m * 60 + s, human_str @service.coroutine def youtube_search(self,q): if q in self.youtube_cache:", "0: human_str = \"{0}:{1:02d}:{2:02d}\".format(h, m, s) else: human_str = \"{1}:{2:02d}\".format(h, m, s) return", "\"https://api.github.com/repos/zbanks/musicazoo/issues\" suffix = \"\\n\\nSubmitted via NLP service.\" bug_data = json.dumps({'title': text, 'body' :", "on the queue\"), (\"bump\", \"Move the last item on the queue to top", "@service.coroutine def queue_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.queue_host,self.queue_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating", "<quote>`: Say a quote and display it on the screen\"), (\"fuck\", \"Swear a", "\"help\": sc_help, \"match\": len(stripped_message) }) rs = yield self.wildcard_suggest(message) suggestions.extend(rs) raise service.Return({'suggestions':suggestions}) @service.coroutine", "try: result = yield service.json_query(self.vol_host,self.vol_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with volume control.\")", "@service.coroutine def cmd_rm_bot(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[-1] yield", "5, } } yield self.queue_cmd(\"add\", data) raise service.Return(u\"Queued fortune.\") def pretty(self,mod): t=mod['type'] if", "if m: result = yield func(self,message,*m.groups()) raise service.Return(result) raise Exception(\"Command not recognized.\") #result", "the current item playing bug - Submit a bug report Anything else -", "text = text.strip() results = [] if text.startswith(\"http:\"): rs = yield self.url_suggest(text) results.extend(rs)", "response = urllib2.urlopen(url) html = response.read() soup = BeautifulSoup(html, 'html.parser') video_ids = []", "wildcard_suggest(self, text): text = text.strip() results = [] if text.startswith(\"http:\"): rs = yield", "query = q.replace(\" \", \"+\") url = \"https://www.youtube.com/results?search_query=\" + query response = urllib2.urlopen(url)", "Youtube results found.') url = result[0][\"url\"] title = result[0][\"title\"] yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\":url}}) raise service.Return(u'Queued", "soup.findAll(attrs={'class':'yt-uix-tile-link'}): video_ids.append(vid['href'][9:]) print(vid) sr = { \"video_id\": vid['href'], \"url\": \"https://www.youtube.com/watch?v={0}\".format(vid['href'][9:]), \"title\": vid['title'], \"thumbnail\":", "video bump - Move the bottom video to the top q|queue - List", "= \"https://www.youtube.com/results?search_query=\" + query response = urllib2.urlopen(url) html = response.read() soup = BeautifulSoup(html,", "shutdown(self): service.ioloop.stop() @service.coroutine def cmd_set_vol(self,q,vol): if vol=='up': result=yield self.vol_cmd(\"get_vol\") vol=min(result['vol']+5,100) elif vol=='down': result=yield", "on the screen as a background\"), (\"video\", \"`video <url>`: Play a video\"), ]", "image on the screen as a background\"), (\"video\", \"`video <url>`: Play a video\"),", "result = yield func(self,message,*m.groups()) raise service.Return(result) raise Exception(\"Command not recognized.\") #result = yield", "@service.coroutine def youtube_suggest(self, q): videos = yield self.youtube_search(q) results = [] for v", "request(bug_url, bug_data, auth=(musicazoo-bugs, musicaz00) raise service.Return(u'Submitted bug: %s - thanks!') @service.coroutine def cmd_help(self,q):", "= NLP() def shutdown_handler(signum,frame): print print \"Received signal, attempting graceful shutdown...\" service.ioloop.add_callback_from_signal(nlp.shutdown) signal.signal(signal.SIGTERM,", "= \"{1}:{2:02d}\".format(h, m, s) return h * 360 + m * 60 +", "queue cur|current - Give the current item playing bug - Submit a bug", "if vol=='up': result=yield self.vol_cmd(\"get_vol\") vol=min(result['vol']+5,100) elif vol=='down': result=yield self.vol_cmd(\"get_vol\") vol=max(result['vol']-5,0) else: vol=int(vol) if", "Return the args dict for the first youtube result for 'match' youtube_search_url =", "import socket import subprocess import tornado.httpclient import traceback import urllib import urllib2 import", "results.extend(yr) raise service.Return(results) @service.coroutine def suggest(self,message): stripped_message = message.strip() suggestions = [] for", "yield self.queue_cmd(\"mv\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Bumped {0} to the top\".format(self.pretty(mod_bot))) @service.coroutine def cmd_yt(self,q,kw): result=yield self.youtube_search(kw)", "cmd_rm_bot(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[-1] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise", "self.youtube_cache = {} super(NLP, self).__init__() @staticmethod def parse_duration(dstr): # Parse ISO 8601 duration", "m is not None else 0 for m in matches] if h >", "#for yi in video_json['items']: # sr = { # \"video_id\": yi[\"id\"], # \"url\":", "raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_bump(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is", "@service.coroutine def cmd_rm_top(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[0] yield", "raise service.Return(results) @service.coroutine def url_suggest(self, url): #TODO results = [{ \"title\": url, \"action\":", "self.youtube_api_key, # \"id\": \",\".join(video_ids), #} #video_form_data = urllib.urlencode(video_data) #video_results = yield http_client.fetch(youtube_video_url +", "def youtube_suggest(self, q): videos = yield self.youtube_search(q) results = [] for v in", "if len(queue)==0: raise service.Return(\"Queue is empty.\") result = '\\n'.join([u\"{0}. {1}\".format(n+1,self.pretty(mod)) for (n,mod) in", "self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_swear(self,q): # Swear words according to yahoo", "service.Return(\"\"\"Commands I understand: help|? - This vol - Get volume vol [num] -", "= yield self.url_suggest(text) results.extend(rs) yr = yield self.youtube_suggest(text) results.extend(yr) raise service.Return(results) @service.coroutine def", "This vol - Get volume vol [num] - Set volume vol up|down -", "results = [] for v in videos: results.append({ \"title\": u\"{0[title]} - [{0[duration][1]}]\".format(v), \"action\":", "\"Move the last item on the queue to top of the queue and", "vol=int(vol) if vol>100: raise Exception(\"Volume cannot be greater than 100\") yield self.vol_cmd(\"set_vol\",{\"vol\":vol}) raise", "(r'^vol$',cmd_get_vol), (r'^stop$',cmd_rm_top), (r'^stfu$',cmd_rm_top), (r'^skip$',cmd_rm_top), (r'^next$',cmd_rm_top), (r'^pop$',cmd_rm_bot), (r'^undo$',cmd_rm_bot), (r'^oops$',cmd_rm_bot), (r'^bump$',cmd_bump), (r'^fuck$',cmd_swear), (r'^fortune$',cmd_fortune), (r'^quote$',cmd_fortune), (r'^q$',cmd_queue),", "if vol>100: raise Exception(\"Volume cannot be greater than 100\") yield self.vol_cmd(\"set_vol\",{\"vol\":vol}) raise service.Return(\"Volume", "old_uids=[mod['uid'] for mod in queue] mod_bot=queue[-1] new_uids=old_uids[-1:]+old_uids[0:-1] yield self.queue_cmd(\"mv\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Bumped {0} to", "rs = yield self.wildcard_suggest(message) suggestions.extend(rs) raise service.Return({'suggestions':suggestions}) @service.coroutine def queue_cmd(self,cmd,args={},assert_success=True): try: result =", "import BeautifulSoup from urlparse import urlparse class NLP(service.JSONCommandProcessor, service.Service): port=settings.ports[\"nlp\"] queue_host='localhost' queue_port=settings.ports[\"queue\"] vol_host='localhost'", "#youtube_video_url = \"https://www.googleapis.com/youtube/v3/videos\" #video_data = { # \"part\": \"contentDetails,snippet,statistics\", # \"key\": self.youtube_api_key, #", "def vol_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.vol_host,self.vol_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with", "= output raise service.Return(output) @service.coroutine def youtube_suggest(self, q): videos = yield self.youtube_search(q) results", "self.url_suggest(text) results.extend(rs) yr = yield self.youtube_suggest(text) results.extend(yr) raise service.Return(results) @service.coroutine def suggest(self,message): stripped_message", "raise service.Return(result) @service.coroutine def cmd_rm_top(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\")", "\",\".join(video_ids), #} #video_form_data = urllib.urlencode(video_data) #video_results = yield http_client.fetch(youtube_video_url + \"?\" + video_form_data)", "yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_bump(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise", "(\"bump\", \"Move the last item on the queue to top of the queue", "import os import random import re import signal import socket import subprocess import", "is empty.\") result = '\\n'.join([u\"{0}. {1}\".format(n+1,self.pretty(mod)) for (n,mod) in enumerate(queue)]) raise service.Return(result) @service.coroutine", "def wildcard_suggest(self, text): text = text.strip() results = [] if text.startswith(\"http:\"): rs =", "yield service.json_query(self.vol_host,self.vol_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with volume control.\") if assert_success: raise", "control.\") if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def do(self,message): message=message.strip() for (regex,func)", "enumerate(queue)]) raise service.Return(result) @service.coroutine def cmd_current(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"(Nothing)\") result", "'do': do, 'suggest': suggest, } suggest_commands = [ (\"vol up\", \"Raise the volume\"),", "a video\"), ] #TODO: splash nlp_commands=[ (r'^help$',cmd_help), (r'^$',cmd_help), (r'^\\?$',cmd_help), (r'^vol (\\d+|up|down)$',cmd_set_vol), (r'^vol$',cmd_get_vol), (r'^stop$',cmd_rm_top),", "vol [num] - Set volume vol up|down - Change volume stop|stfu|skip|next - Remove", "data) raise service.Return(u\"Queued fortune.\") def pretty(self,mod): t=mod['type'] if t=='youtube' and 'title' in mod['parameters']:", "yield service.json_query(self.queue_host,self.queue_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with queue.\") if assert_success: raise service.Return(packet.assert_success(result))", "raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_rm_bot(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is", "(r'^stop$',cmd_rm_top), (r'^stfu$',cmd_rm_top), (r'^skip$',cmd_rm_top), (r'^next$',cmd_rm_top), (r'^pop$',cmd_rm_bot), (r'^undo$',cmd_rm_bot), (r'^oops$',cmd_rm_bot), (r'^bump$',cmd_bump), (r'^fuck$',cmd_swear), (r'^fortune$',cmd_fortune), (r'^quote$',cmd_fortune), (r'^q$',cmd_queue), (r'^queue$',cmd_queue),", "# Return the args dict for the first youtube result for 'match' youtube_search_url", "results.extend(rs) yr = yield self.youtube_suggest(text) results.extend(yr) raise service.Return(results) @service.coroutine def suggest(self,message): stripped_message =", "def cmd_set_vol(self,q,vol): if vol=='up': result=yield self.vol_cmd(\"get_vol\") vol=min(result['vol']+5,100) elif vol=='down': result=yield self.vol_cmd(\"get_vol\") vol=max(result['vol']-5,0) else:", "def parse_duration(dstr): # Parse ISO 8601 duration strings: PT#M#S hms_str = dstr.strip() try:", "Exception(\"Queue is empty!\") mod=queue[-1] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_bump(self,q): queue=yield", "service.Return({'message':'Did '+message}) def shutdown(self): service.ioloop.stop() @service.coroutine def cmd_set_vol(self,q,vol): if vol=='up': result=yield self.vol_cmd(\"get_vol\") vol=min(result['vol']+5,100)", "- Remove the bottom video bump - Move the bottom video to the", "found.') url = result[0][\"url\"] title = result[0][\"title\"] yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\":url}}) raise service.Return(u'Queued \"{0}\"'.format(title)) @service.coroutine", "self.pretty(queue[0]) raise service.Return(result) @service.coroutine def cmd_rm_top(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is", "raise service.Return(result) @service.coroutine def do(self,message): message=message.strip() for (regex,func) in self.nlp_commands: m=re.match(regex,message,re.I) if m:", "return 0, hms_str h, m, s = [int(m.strip(\"HMS\")) if m is not None", "urlparse import urlparse class NLP(service.JSONCommandProcessor, service.Service): port=settings.ports[\"nlp\"] queue_host='localhost' queue_port=settings.ports[\"queue\"] vol_host='localhost' vol_port=settings.ports[\"vol\"] pretty_params={'youtube':['title'], 'text':", "'text': ['text']} youtube_api_key = settings.youtube_api_key def __init__(self): print \"NLP started.\" self.youtube_cache = {}", "q.replace(\" \", \"+\") url = \"https://www.youtube.com/results?search_query=\" + query response = urllib2.urlopen(url) html =", "suggest(self,message): stripped_message = message.strip() suggestions = [] for sc, sc_help in self.suggest_commands: if", "suggestions.extend(rs) raise service.Return({'suggestions':suggestions}) @service.coroutine def queue_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.queue_host,self.queue_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError):", "<gh_stars>0 import json import os import random import re import signal import socket", "than 100\") yield self.vol_cmd(\"set_vol\",{\"vol\":vol}) raise service.Return(\"Volume set to {0}\".format(vol)) @service.coroutine def cmd_get_vol(self,q): result=yield", "def cmd_image(self,q,url): yield self.queue_cmd(\"set_bg\",{\"type\":\"image\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_say(self,q,text): yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}})", "and 'title' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['title']) #if t=='netvid': # return u'{0}'.format(mod['parameters']['short_description']) if t=='text'", "\"`video <url>`: Play a video\"), ] #TODO: splash nlp_commands=[ (r'^help$',cmd_help), (r'^$',cmd_help), (r'^\\?$',cmd_help), (r'^vol", "}) rs = yield self.wildcard_suggest(message) suggestions.extend(rs) raise service.Return({'suggestions':suggestions}) @service.coroutine def queue_cmd(self,cmd,args={},assert_success=True): try: result", "cmd_fortune(self, q): fortune_args = settings.get(\"fortune_args\", ['-s']) fortune_text = subprocess.check_output(['/usr/games/fortune'] + fortune_args) data =", "(n,mod) in enumerate(queue)]) raise service.Return(result) @service.coroutine def cmd_current(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise", "random import re import signal import socket import subprocess import tornado.httpclient import traceback", "is currently playing\"), (\"pop\", \"Remove the last item on the queue\"), (\"bump\", \"Move", "video\"), ] #TODO: splash nlp_commands=[ (r'^help$',cmd_help), (r'^$',cmd_help), (r'^\\?$',cmd_help), (r'^vol (\\d+|up|down)$',cmd_set_vol), (r'^vol$',cmd_get_vol), (r'^stop$',cmd_rm_top), (r'^stfu$',cmd_rm_top),", "yi[\"id\"], # \"url\": \"http://www.youtube.com/watch?v={0}\".format(yi[\"id\"]), # \"title\": yi[\"snippet\"][\"title\"], # \"thumbnail\": yi[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"], # \"publish_time\": yi[\"snippet\"][\"publishedAt\"],", "text.') @service.coroutine def cmd_image(self,q,url): yield self.queue_cmd(\"set_bg\",{\"type\":\"image\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_say(self,q,text):", "service.Return(u'Submitted bug: %s - thanks!') @service.coroutine def cmd_help(self,q): raise service.Return(\"\"\"Commands I understand: help|?", "= text.strip() results = [] if text.startswith(\"http:\"): rs = yield self.url_suggest(text) results.extend(rs) yr", "Change volume stop|stfu|skip|next - Remove the top video pop|undo|oops - Remove the bottom", "cur|current - Give the current item playing bug - Submit a bug report", "current item playing bug - Submit a bug report Anything else - Queue", "suffix}) password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() try: password_mgr.add_password(None, bug_url, settings.github_login[0], settings.github_login[1]) except AttributeError: raise service.Return(u\"No", "I understand: help|? - This vol - Get volume vol [num] - Set", "60 + s, human_str @service.coroutine def youtube_search(self,q): if q in self.youtube_cache: print \"cache", "\"match\": len(stripped_message) }) rs = yield self.wildcard_suggest(message) suggestions.extend(rs) raise service.Return({'suggestions':suggestions}) @service.coroutine def queue_cmd(self,cmd,args={},assert_success=True):", "len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[0] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def", "len(queue)==1: raise Exception(\"Only one thing on the queue!\") old_uids=[mod['uid'] for mod in queue]", "\"relevance\", \"safesearch\": \"none\", \"type\": \"video\", \"max-results\": 5, \"q\": q, } query = q.replace(\"", "+ \"?\" + video_form_data) #video_json = json.loads(video_results.body) #output = [] #for yi in", "shmooze.lib.service as service import shmooze.settings as settings import urllib2 from bs4 import BeautifulSoup", "youtube_suggest(self, q): videos = yield self.youtube_search(q) results = [] for v in videos:", "@service.coroutine def cmd_youtube_raw(self,q,url): yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_image(self,q,url): yield", "= yield self.wildcard_suggest(message) suggestions.extend(rs) raise service.Return({'suggestions':suggestions}) @service.coroutine def queue_cmd(self,cmd,args={},assert_success=True): try: result = yield", "\"snippet\", \"key\": self.youtube_api_key, \"order\": \"relevance\", \"safesearch\": \"none\", \"type\": \"video\", \"max-results\": 5, \"q\": q,", "self.youtube_api_key, \"order\": \"relevance\", \"safesearch\": \"none\", \"type\": \"video\", \"max-results\": 5, \"q\": q, } query", "\"Raise the volume\"), (\"vol down\", \"Lower the volume\"), (\"skip\", \"Remove the current item", "\"no\", \"views\": \"no\", \"duration\": (1,1)} output.append(sr) # output.append(sr) #youtube_video_url = \"https://www.googleapis.com/youtube/v3/videos\" #video_data =", "print(vid) sr = { \"video_id\": vid['href'], \"url\": \"https://www.youtube.com/watch?v={0}\".format(vid['href'][9:]), \"title\": vid['title'], \"thumbnail\": \"no\", \"publish_time\":", "def cmd_swear(self,q): # Swear words according to yahoo chat. # See: http://ridiculousfish.com/blog/posts/YahooChatRooms.html words", "item on the queue\"), (\"bump\", \"Move the last item on the queue to", "self.vol_cmd(\"get_vol\") vol=max(result['vol']-5,0) else: vol=int(vol) if vol>100: raise Exception(\"Volume cannot be greater than 100\")", "NLP(service.JSONCommandProcessor, service.Service): port=settings.ports[\"nlp\"] queue_host='localhost' queue_port=settings.ports[\"queue\"] vol_host='localhost' vol_port=settings.ports[\"vol\"] pretty_params={'youtube':['title'], 'text': ['text']} youtube_api_key = settings.youtube_api_key", "'text2speech': 'google', 'text2screen': 'paragraph', #'renderer': 'mono_paragraph', 'duration': 5, } } yield self.queue_cmd(\"add\", data)", "cmd_rm_top(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[0] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise", "play it\"), (\"say\", \"`say <quote>`: Say a quote and display it on the", "strings: PT#M#S hms_str = dstr.strip() try: matches = re.match(r\"PT(\\d+H)?(\\d{1,2}M)?(\\d{1,2}S)\", hms_str).groups() except: print hms_str", "= \"ahole,aholes,asshole,assholes,asswipe,biatch,bitch,bitches,blo_job,blow_job,blowjob,cocksucker,cunt,cunts,dickhead,fuck,fucked,fucking,fuckoff,fucks,handjob,handjobs,motherfucker,mother-fucker,motherfuckers,muthafucker,muthafuckers,nigga,niggs,nigger,niggers,pedofile,pedophile,phag,phuc,phuck,phucked,phucker,shat,shit,shits,shithead,shitter,shitting\".split(\",\") selection = random.sample(words, 5) text = \" \".join(selection) yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise", "(r'^bump$',cmd_bump), (r'^fuck$',cmd_swear), (r'^fortune$',cmd_fortune), (r'^quote$',cmd_fortune), (r'^q$',cmd_queue), (r'^queue$',cmd_queue), (r'^cur(?:rent)?$',cmd_current), (r'^say (.+)$',cmd_say), (r'^image (.+)$',cmd_image), (r'^youtube (.+)$',cmd_youtube_raw),", "Set volume vol up|down - Change volume stop|stfu|skip|next - Remove the top video", "= message.strip() suggestions = [] for sc, sc_help in self.suggest_commands: if sc.startswith(stripped_message): suggestions.append({", "s = [int(m.strip(\"HMS\")) if m is not None else 0 for m in", "#video_json = json.loads(video_results.body) #output = [] #for yi in video_json['items']: # sr =", "{0}\".format(self.pretty(mod))) @service.coroutine def cmd_bump(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") if", "volume vol up|down - Change volume stop|stfu|skip|next - Remove the top video pop|undo|oops", "@service.coroutine def cmd_say(self,q,text): yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_swear(self,q): # Swear", "\"match\": 0, }) raise service.Return(results) @service.coroutine def url_suggest(self, url): #TODO results = [{", "set to {0}\".format(vol)) @service.coroutine def cmd_get_vol(self,q): result=yield self.vol_cmd(\"get_vol\") raise service.Return(\"Volume is {0}\".format(result.get(\"vol\", \"unknown\")))", "['-s']) fortune_text = subprocess.check_output(['/usr/games/fortune'] + fortune_args) data = { 'type': 'text', 'args': {", "urllib.urlencode(video_data) #video_results = yield http_client.fetch(youtube_video_url + \"?\" + video_form_data) #video_json = json.loads(video_results.body) #output", "def __init__(self): print \"NLP started.\" self.youtube_cache = {} super(NLP, self).__init__() @staticmethod def parse_duration(dstr):", "rs = yield self.url_suggest(text) results.extend(rs) yr = yield self.youtube_suggest(text) results.extend(yr) raise service.Return(results) @service.coroutine", "(r'^oops$',cmd_rm_bot), (r'^bump$',cmd_bump), (r'^fuck$',cmd_swear), (r'^fortune$',cmd_fortune), (r'^quote$',cmd_fortune), (r'^q$',cmd_queue), (r'^queue$',cmd_queue), (r'^cur(?:rent)?$',cmd_current), (r'^say (.+)$',cmd_say), (r'^image (.+)$',cmd_image), (r'^youtube", "'google', 'text2screen': 'paragraph', #'renderer': 'mono_paragraph', 'duration': 5, } } yield self.queue_cmd(\"add\", data) raise", "def shutdown(self): service.ioloop.stop() @service.coroutine def cmd_set_vol(self,q,vol): if vol=='up': result=yield self.vol_cmd(\"get_vol\") vol=min(result['vol']+5,100) elif vol=='down':", "yi[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"], # \"publish_time\": yi[\"snippet\"][\"publishedAt\"], # \"views\": yi[\"statistics\"][\"viewCount\"], #\"duration\": self.parse_duration(yi[\"contentDetails\"][\"duration\"]), #} #output.append(sr) self.youtube_cache[q] =", "sc.startswith(stripped_message): suggestions.append({ \"title\": sc, \"action\": sc, \"help\": sc_help, \"match\": len(stripped_message) }) rs =", "to top of the queue and play it\"), (\"say\", \"`say <quote>`: Say a", "dict for the first youtube result for 'match' youtube_search_url = \"https://www.googleapis.com/youtube/v3/search\" search_data =", "queue that is currently playing\"), (\"pop\", \"Remove the last item on the queue\"),", "background\"), (\"video\", \"`video <url>`: Play a video\"), ] #TODO: splash nlp_commands=[ (r'^help$',cmd_help), (r'^$',cmd_help),", "query response = urllib2.urlopen(url) html = response.read() soup = BeautifulSoup(html, 'html.parser') video_ids =", "yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_rm_bot(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise", "= {} super(NLP, self).__init__() @staticmethod def parse_duration(dstr): # Parse ISO 8601 duration strings:", "def cmd_say(self,q,text): yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_swear(self,q): # Swear words", "is empty!\") mod=queue[0] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_rm_bot(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params})", "Parse ISO 8601 duration strings: PT#M#S hms_str = dstr.strip() try: matches = re.match(r\"PT(\\d+H)?(\\d{1,2}M)?(\\d{1,2}S)\",", "@service.coroutine def cmd_fortune(self, q): fortune_args = settings.get(\"fortune_args\", ['-s']) fortune_text = subprocess.check_output(['/usr/games/fortune'] + fortune_args)", "except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with queue.\") if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result)", "in video_json['items']: # sr = { # \"video_id\": yi[\"id\"], # \"url\": \"http://www.youtube.com/watch?v={0}\".format(yi[\"id\"]), #", "service.Return(u'Queued text.') @service.coroutine def cmd_swear(self,q): # Swear words according to yahoo chat. #", "fortune_args = settings.get(\"fortune_args\", ['-s']) fortune_text = subprocess.check_output(['/usr/games/fortune'] + fortune_args) data = { 'type':", "@service.coroutine def cmd_help(self,q): raise service.Return(\"\"\"Commands I understand: help|? - This vol - Get", "text.startswith(\"http:\"): rs = yield self.url_suggest(text) results.extend(rs) yr = yield self.youtube_suggest(text) results.extend(yr) raise service.Return(results)", "self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_rm_bot(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue", "bs4 import BeautifulSoup from urlparse import urlparse class NLP(service.JSONCommandProcessor, service.Service): port=settings.ports[\"nlp\"] queue_host='localhost' queue_port=settings.ports[\"queue\"]", "= settings.youtube_api_key def __init__(self): print \"NLP started.\" self.youtube_cache = {} super(NLP, self).__init__() @staticmethod", "print \"cache hit\" raise service.Return(self.youtube_cache[q]) print \"cache miss\" http_client = tornado.httpclient.AsyncHTTPClient() # Return", "print \"cache miss\" http_client = tornado.httpclient.AsyncHTTPClient() # Return the args dict for the", "tornado.httpclient.AsyncHTTPClient() # Return the args dict for the first youtube result for 'match'", "assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def do(self,message): message=message.strip() for (regex,func) in self.nlp_commands:", "fortune_args) data = { 'type': 'text', 'args': { 'text': fortune_text, #'screen_preprocessor': 'none', 'speech_preprocessor':", "yield service.Return(results) @service.coroutine def wildcard_suggest(self, text): text = text.strip() results = [] if", "{0} to the top\".format(self.pretty(mod_bot))) @service.coroutine def cmd_yt(self,q,kw): result=yield self.youtube_search(kw) if not result or", "self.queue_cmd(\"mv\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Bumped {0} to the top\".format(self.pretty(mod_bot))) @service.coroutine def cmd_yt(self,q,kw): result=yield self.youtube_search(kw) if", "volume\"), (\"skip\", \"Remove the current item on the queue that is currently playing\"),", "on the screen\"), (\"fuck\", \"Swear a bunch\"), (\"quote\", \"Display a quote from the", "else 0 for m in matches] if h > 0: human_str = \"{0}:{1:02d}:{2:02d}\".format(h,", "if t=='youtube' and 'title' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['title']) #if t=='netvid': # return u'{0}'.format(mod['parameters']['short_description'])", "a bunch\"), (\"quote\", \"Display a quote from the fortune database\"), (\"image\", \"`image <url>`:", "understand: help|? - This vol - Get volume vol [num] - Set volume", "(\"vol up\", \"Raise the volume\"), (\"vol down\", \"Lower the volume\"), (\"skip\", \"Remove the", "# \"video_id\": yi[\"id\"], # \"url\": \"http://www.youtube.com/watch?v={0}\".format(yi[\"id\"]), # \"title\": yi[\"snippet\"][\"title\"], # \"thumbnail\": yi[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"], #", "top of the queue and play it\"), (\"say\", \"`say <quote>`: Say a quote", "started.\" self.youtube_cache = {} super(NLP, self).__init__() @staticmethod def parse_duration(dstr): # Parse ISO 8601", "s) return h * 360 + m * 60 + s, human_str @service.coroutine", "class NLP(service.JSONCommandProcessor, service.Service): port=settings.ports[\"nlp\"] queue_host='localhost' queue_port=settings.ports[\"queue\"] vol_host='localhost' vol_port=settings.ports[\"vol\"] pretty_params={'youtube':['title'], 'text': ['text']} youtube_api_key =", "(1,1)} output.append(sr) # output.append(sr) #youtube_video_url = \"https://www.googleapis.com/youtube/v3/videos\" #video_data = { # \"part\": \"contentDetails,snippet,statistics\",", "\"thumbnail\": yi[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"], # \"publish_time\": yi[\"snippet\"][\"publishedAt\"], # \"views\": yi[\"statistics\"][\"viewCount\"], #\"duration\": self.parse_duration(yi[\"contentDetails\"][\"duration\"]), #} #output.append(sr) self.youtube_cache[q]", "= tornado.httpclient.AsyncHTTPClient() # Return the args dict for the first youtube result for", "@service.coroutine def do(self,message): message=message.strip() for (regex,func) in self.nlp_commands: m=re.match(regex,message,re.I) if m: result =", "for sc, sc_help in self.suggest_commands: if sc.startswith(stripped_message): suggestions.append({ \"title\": sc, \"action\": sc, \"help\":", "(r'^vol (\\d+|up|down)$',cmd_set_vol), (r'^vol$',cmd_get_vol), (r'^stop$',cmd_rm_top), (r'^stfu$',cmd_rm_top), (r'^skip$',cmd_rm_top), (r'^next$',cmd_rm_top), (r'^pop$',cmd_rm_bot), (r'^undo$',cmd_rm_bot), (r'^oops$',cmd_rm_bot), (r'^bump$',cmd_bump), (r'^fuck$',cmd_swear), (r'^fortune$',cmd_fortune),", "# \"url\": \"http://www.youtube.com/watch?v={0}\".format(yi[\"id\"]), # \"title\": yi[\"snippet\"][\"title\"], # \"thumbnail\": yi[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"], # \"publish_time\": yi[\"snippet\"][\"publishedAt\"], #", "fortune_text, #'screen_preprocessor': 'none', 'speech_preprocessor': 'pronounce_fortune', 'text2speech': 'google', 'text2screen': 'paragraph', #'renderer': 'mono_paragraph', 'duration': 5,", "\"match\": len(url) }] yield service.Return(results) @service.coroutine def wildcard_suggest(self, text): text = text.strip() results", "(\"skip\", \"Remove the current item on the queue that is currently playing\"), (\"pop\",", "http://ridiculousfish.com/blog/posts/YahooChatRooms.html words = \"ahole,aholes,asshole,assholes,asswipe,biatch,bitch,bitches,blo_job,blow_job,blowjob,cocksucker,cunt,cunts,dickhead,fuck,fucked,fucking,fuckoff,fucks,handjob,handjobs,motherfucker,mother-fucker,motherfuckers,muthafucker,muthafuckers,nigga,niggs,nigger,niggers,pedofile,pedophile,phag,phuc,phuck,phucked,phucker,shat,shit,shits,shithead,shitter,shitting\".split(\",\") selection = random.sample(words, 5) text = \" \".join(selection) yield", "raise service.Return(results) @service.coroutine def suggest(self,message): stripped_message = message.strip() suggestions = [] for sc,", "vid['title'], \"thumbnail\": \"no\", \"publish_time\": \"no\", \"views\": \"no\", \"duration\": (1,1)} output.append(sr) # output.append(sr) #youtube_video_url", "settings.github_login[0], settings.github_login[1]) except AttributeError: raise service.Return(u\"No github account configured in settings.json\") handler =", "Move the bottom video to the top q|queue - List the queue cur|current", "m, s = [int(m.strip(\"HMS\")) if m is not None else 0 for m", "raise Exception(\"Queue is empty!\") mod=queue[-1] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_bump(self,q):", "q, } query = q.replace(\" \", \"+\") url = \"https://www.youtube.com/results?search_query=\" + query response", "vol=='down': result=yield self.vol_cmd(\"get_vol\") vol=max(result['vol']-5,0) else: vol=int(vol) if vol>100: raise Exception(\"Volume cannot be greater", "Display an image on the screen as a background\"), (\"video\", \"`video <url>`: Play", "first youtube result for 'match' youtube_search_url = \"https://www.googleapis.com/youtube/v3/search\" search_data = { \"part\": \"snippet\",", "\"duration\": (1,1)} output.append(sr) # output.append(sr) #youtube_video_url = \"https://www.googleapis.com/youtube/v3/videos\" #video_data = { # \"part\":", "self.wildcard_suggest(message) suggestions.extend(rs) raise service.Return({'suggestions':suggestions}) @service.coroutine def queue_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.queue_host,self.queue_port,{\"cmd\":cmd,\"args\":args}) except", "result=yield self.vol_cmd(\"get_vol\") raise service.Return(\"Volume is {0}\".format(result.get(\"vol\", \"unknown\"))) @service.coroutine def cmd_queue(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if", "(r'^(.+)$',cmd_yt), ] nlp = NLP() def shutdown_handler(signum,frame): print print \"Received signal, attempting graceful", "}) raise service.Return(results) @service.coroutine def url_suggest(self, url): #TODO results = [{ \"title\": url,", "service.Return({'suggestions':suggestions}) @service.coroutine def queue_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.queue_host,self.queue_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error", "PT#M#S hms_str = dstr.strip() try: matches = re.match(r\"PT(\\d+H)?(\\d{1,2}M)?(\\d{1,2}S)\", hms_str).groups() except: print hms_str return", "in queue] mod_bot=queue[-1] new_uids=old_uids[-1:]+old_uids[0:-1] yield self.queue_cmd(\"mv\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Bumped {0} to the top\".format(self.pretty(mod_bot))) @service.coroutine", "volume control.\") if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def do(self,message): message=message.strip() for", "m, s) else: human_str = \"{1}:{2:02d}\".format(h, m, s) return h * 360 +", "human_str = \"{1}:{2:02d}\".format(h, m, s) return h * 360 + m * 60", "output.append(sr) # output.append(sr) #youtube_video_url = \"https://www.googleapis.com/youtube/v3/videos\" #video_data = { # \"part\": \"contentDetails,snippet,statistics\", #", "nlp_commands=[ (r'^help$',cmd_help), (r'^$',cmd_help), (r'^\\?$',cmd_help), (r'^vol (\\d+|up|down)$',cmd_set_vol), (r'^vol$',cmd_get_vol), (r'^stop$',cmd_rm_top), (r'^stfu$',cmd_rm_top), (r'^skip$',cmd_rm_top), (r'^next$',cmd_rm_top), (r'^pop$',cmd_rm_bot), (r'^undo$',cmd_rm_bot),", "raise service.Return(u'Queued swearing.') @service.coroutine def cmd_fortune(self, q): fortune_args = settings.get(\"fortune_args\", ['-s']) fortune_text =", "in matches] if h > 0: human_str = \"{0}:{1:02d}:{2:02d}\".format(h, m, s) else: human_str", "and 'text' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['text']) return u'({0})'.format(t) @service.coroutine def cmd_bug(self,q,text): bug_url =", "#TODO request(bug_url, bug_data, auth=(musicazoo-bugs, musicaz00) raise service.Return(u'Submitted bug: %s - thanks!') @service.coroutine def", "\"`image <url>`: Display an image on the screen as a background\"), (\"video\", \"`video", "sc, sc_help in self.suggest_commands: if sc.startswith(stripped_message): suggestions.append({ \"title\": sc, \"action\": sc, \"help\": sc_help,", "\"https://www.googleapis.com/youtube/v3/videos\" #video_data = { # \"part\": \"contentDetails,snippet,statistics\", # \"key\": self.youtube_api_key, # \"id\": \",\".join(video_ids),", "[] #for yi in video_json['items']: # sr = { # \"video_id\": yi[\"id\"], #", "= yield service.json_query(self.queue_host,self.queue_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with queue.\") if assert_success: raise", "(\"video\", \"`video <url>`: Play a video\"), ] #TODO: splash nlp_commands=[ (r'^help$',cmd_help), (r'^$',cmd_help), (r'^\\?$',cmd_help),", "+ m * 60 + s, human_str @service.coroutine def youtube_search(self,q): if q in", "= result[0][\"title\"] yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\":url}}) raise service.Return(u'Queued \"{0}\"'.format(title)) @service.coroutine def cmd_youtube_raw(self,q,url): yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\": url}})", "traceback import urllib import urllib2 import shmooze.lib.packet as packet import shmooze.lib.service as service", "= [] #for yi in video_json['items']: # sr = { # \"video_id\": yi[\"id\"],", "handler = urllib2.HTTPBasicAuthHandler(password_mgr) #TODO request(bug_url, bug_data, auth=(musicazoo-bugs, musicaz00) raise service.Return(u'Submitted bug: %s -", "= \" \".join(selection) yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued swearing.') @service.coroutine def cmd_fortune(self, q): fortune_args", "\" \".join(selection) yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued swearing.') @service.coroutine def cmd_fortune(self, q): fortune_args =", "{ \"part\": \"snippet\", \"key\": self.youtube_api_key, \"order\": \"relevance\", \"safesearch\": \"none\", \"type\": \"video\", \"max-results\": 5,", "+ s, human_str @service.coroutine def youtube_search(self,q): if q in self.youtube_cache: print \"cache hit\"", "volume vol [num] - Set volume vol up|down - Change volume stop|stfu|skip|next -", "\"title\": yi[\"snippet\"][\"title\"], # \"thumbnail\": yi[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"], # \"publish_time\": yi[\"snippet\"][\"publishedAt\"], # \"views\": yi[\"statistics\"][\"viewCount\"], #\"duration\": self.parse_duration(yi[\"contentDetails\"][\"duration\"]),", "\"views\": \"no\", \"duration\": (1,1)} output.append(sr) # output.append(sr) #youtube_video_url = \"https://www.googleapis.com/youtube/v3/videos\" #video_data = {", "\"title\": vid['title'], \"thumbnail\": \"no\", \"publish_time\": \"no\", \"views\": \"no\", \"duration\": (1,1)} output.append(sr) # output.append(sr)", "#TODO results = [{ \"title\": url, \"action\": url, \"help\": \"\", \"match\": len(url) }]", "def cmd_current(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"(Nothing)\") result = self.pretty(queue[0]) raise service.Return(result)", "} query = q.replace(\" \", \"+\") url = \"https://www.youtube.com/results?search_query=\" + query response =", "if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def vol_cmd(self,cmd,args={},assert_success=True): try: result = yield", "in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['text']) return u'({0})'.format(t) @service.coroutine def cmd_bug(self,q,text): bug_url = \"https://api.github.com/repos/zbanks/musicazoo/issues\" suffix", "video_form_data) #video_json = json.loads(video_results.body) #output = [] #for yi in video_json['items']: # sr", "recognized.\") #result = yield self.queue_cmd(\"queue\") raise service.Return({'message':'Did '+message}) def shutdown(self): service.ioloop.stop() @service.coroutine def", "vol up|down - Change volume stop|stfu|skip|next - Remove the top video pop|undo|oops -", "the volume\"), (\"vol down\", \"Lower the volume\"), (\"skip\", \"Remove the current item on", "import traceback import urllib import urllib2 import shmooze.lib.packet as packet import shmooze.lib.service as", "\"help\": \"Play video from YouTube\", \"match\": 0, }) raise service.Return(results) @service.coroutine def url_suggest(self,", "result = yield service.json_query(self.vol_host,self.vol_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with volume control.\") if", "thanks!') @service.coroutine def cmd_help(self,q): raise service.Return(\"\"\"Commands I understand: help|? - This vol -", "service.Return(results) @service.coroutine def suggest(self,message): stripped_message = message.strip() suggestions = [] for sc, sc_help", "def suggest(self,message): stripped_message = message.strip() suggestions = [] for sc, sc_help in self.suggest_commands:", "hms_str return 0, hms_str h, m, s = [int(m.strip(\"HMS\")) if m is not", "words = \"ahole,aholes,asshole,assholes,asswipe,biatch,bitch,bitches,blo_job,blow_job,blowjob,cocksucker,cunt,cunts,dickhead,fuck,fucked,fucking,fuckoff,fucks,handjob,handjobs,motherfucker,mother-fucker,motherfuckers,muthafucker,muthafuckers,nigga,niggs,nigger,niggers,pedofile,pedophile,phag,phuc,phuck,phucked,phucker,shat,shit,shits,shithead,shitter,shitting\".split(\",\") selection = random.sample(words, 5) text = \" \".join(selection) yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}})", "(r'^undo$',cmd_rm_bot), (r'^oops$',cmd_rm_bot), (r'^bump$',cmd_bump), (r'^fuck$',cmd_swear), (r'^fortune$',cmd_fortune), (r'^quote$',cmd_fortune), (r'^q$',cmd_queue), (r'^queue$',cmd_queue), (r'^cur(?:rent)?$',cmd_current), (r'^say (.+)$',cmd_say), (r'^image (.+)$',cmd_image),", "url}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_image(self,q,url): yield self.queue_cmd(\"set_bg\",{\"type\":\"image\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.')", "message=message.strip() for (regex,func) in self.nlp_commands: m=re.match(regex,message,re.I) if m: result = yield func(self,message,*m.groups()) raise", "a background\"), (\"video\", \"`video <url>`: Play a video\"), ] #TODO: splash nlp_commands=[ (r'^help$',cmd_help),", "[{ \"title\": url, \"action\": url, \"help\": \"\", \"match\": len(url) }] yield service.Return(results) @service.coroutine", "= \"https://www.googleapis.com/youtube/v3/videos\" #video_data = { # \"part\": \"contentDetails,snippet,statistics\", # \"key\": self.youtube_api_key, # \"id\":", "5) text = \" \".join(selection) yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued swearing.') @service.coroutine def cmd_fortune(self,", "= [] output = [] for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}): video_ids.append(vid['href'][9:]) print(vid) sr =", "return h * 360 + m * 60 + s, human_str @service.coroutine def", "bump - Move the bottom video to the top q|queue - List the", "# \"title\": yi[\"snippet\"][\"title\"], # \"thumbnail\": yi[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"], # \"publish_time\": yi[\"snippet\"][\"publishedAt\"], # \"views\": yi[\"statistics\"][\"viewCount\"], #\"duration\":", "u'\"{0}\"'.format(mod['parameters']['title']) #if t=='netvid': # return u'{0}'.format(mod['parameters']['short_description']) if t=='text' and 'text' in mod['parameters']: return", "service.Return(result) @service.coroutine def vol_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.vol_host,self.vol_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error", "import shmooze.lib.packet as packet import shmooze.lib.service as service import shmooze.settings as settings import", "text): text = text.strip() results = [] if text.startswith(\"http:\"): rs = yield self.url_suggest(text)", "Exception(\"Queue is empty!\") mod=queue[0] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_rm_bot(self,q): queue=yield", "raise service.Return(u\"Bumped {0} to the top\".format(self.pretty(mod_bot))) @service.coroutine def cmd_yt(self,q,kw): result=yield self.youtube_search(kw) if not", "down\", \"Lower the volume\"), (\"skip\", \"Remove the current item on the queue that", "splash nlp_commands=[ (r'^help$',cmd_help), (r'^$',cmd_help), (r'^\\?$',cmd_help), (r'^vol (\\d+|up|down)$',cmd_set_vol), (r'^vol$',cmd_get_vol), (r'^stop$',cmd_rm_top), (r'^stfu$',cmd_rm_top), (r'^skip$',cmd_rm_top), (r'^next$',cmd_rm_top), (r'^pop$',cmd_rm_bot),", "q): videos = yield self.youtube_search(q) results = [] for v in videos: results.append({", "{} super(NLP, self).__init__() @staticmethod def parse_duration(dstr): # Parse ISO 8601 duration strings: PT#M#S", "(r'(https?://.+)',cmd_youtube_raw), (r'^(.+)$',cmd_yt), ] nlp = NLP() def shutdown_handler(signum,frame): print print \"Received signal, attempting", "in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['title']) #if t=='netvid': # return u'{0}'.format(mod['parameters']['short_description']) if t=='text' and 'text'", "yield self.url_suggest(text) results.extend(rs) yr = yield self.youtube_suggest(text) results.extend(yr) raise service.Return(results) @service.coroutine def suggest(self,message):", "mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['text']) return u'({0})'.format(t) @service.coroutine def cmd_bug(self,q,text): bug_url = \"https://api.github.com/repos/zbanks/musicazoo/issues\" suffix =", "m: result = yield func(self,message,*m.groups()) raise service.Return(result) raise Exception(\"Command not recognized.\") #result =", "return u'{0}'.format(mod['parameters']['short_description']) if t=='text' and 'text' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['text']) return u'({0})'.format(t) @service.coroutine", "human_str = \"{0}:{1:02d}:{2:02d}\".format(h, m, s) else: human_str = \"{1}:{2:02d}\".format(h, m, s) return h", "queue and play it\"), (\"say\", \"`say <quote>`: Say a quote and display it", "sc, \"action\": sc, \"help\": sc_help, \"match\": len(stripped_message) }) rs = yield self.wildcard_suggest(message) suggestions.extend(rs)", "}] yield service.Return(results) @service.coroutine def wildcard_suggest(self, text): text = text.strip() results = []", "'\\n'.join([u\"{0}. {1}\".format(n+1,self.pretty(mod)) for (n,mod) in enumerate(queue)]) raise service.Return(result) @service.coroutine def cmd_current(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params})", "the queue!\") old_uids=[mod['uid'] for mod in queue] mod_bot=queue[-1] new_uids=old_uids[-1:]+old_uids[0:-1] yield self.queue_cmd(\"mv\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Bumped", "yield self.youtube_suggest(text) results.extend(yr) raise service.Return(results) @service.coroutine def suggest(self,message): stripped_message = message.strip() suggestions =", "try: password_mgr.add_password(None, bug_url, settings.github_login[0], settings.github_login[1]) except AttributeError: raise service.Return(u\"No github account configured in", "is {0}\".format(result.get(\"vol\", \"unknown\"))) @service.coroutine def cmd_queue(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"Queue is", "service.Return(\"Queue is empty.\") result = '\\n'.join([u\"{0}. {1}\".format(n+1,self.pretty(mod)) for (n,mod) in enumerate(queue)]) raise service.Return(result)", "text.') @service.coroutine def cmd_say(self,q,text): yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_swear(self,q): #", "func(self,message,*m.groups()) raise service.Return(result) raise Exception(\"Command not recognized.\") #result = yield self.queue_cmd(\"queue\") raise service.Return({'message':'Did", "raise Exception(\"Volume cannot be greater than 100\") yield self.vol_cmd(\"set_vol\",{\"vol\":vol}) raise service.Return(\"Volume set to", "empty!\") mod=queue[-1] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_bump(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if", "raise service.Return(u'Queued \"{0}\"'.format(title)) @service.coroutine def cmd_youtube_raw(self,q,url): yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine", "a quote from the fortune database\"), (\"image\", \"`image <url>`: Display an image on", "service.json_query(self.queue_host,self.queue_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with queue.\") if assert_success: raise service.Return(packet.assert_success(result)) raise", "results = [] if text.startswith(\"http:\"): rs = yield self.url_suggest(text) results.extend(rs) yr = yield", "\"safesearch\": \"none\", \"type\": \"video\", \"max-results\": 5, \"q\": q, } query = q.replace(\" \",", "sr = { \"video_id\": vid['href'], \"url\": \"https://www.youtube.com/watch?v={0}\".format(vid['href'][9:]), \"title\": vid['title'], \"thumbnail\": \"no\", \"publish_time\": \"no\",", "\"publish_time\": \"no\", \"views\": \"no\", \"duration\": (1,1)} output.append(sr) # output.append(sr) #youtube_video_url = \"https://www.googleapis.com/youtube/v3/videos\" #video_data", "- This vol - Get volume vol [num] - Set volume vol up|down", "words according to yahoo chat. # See: http://ridiculousfish.com/blog/posts/YahooChatRooms.html words = \"ahole,aholes,asshole,assholes,asswipe,biatch,bitch,bitches,blo_job,blow_job,blowjob,cocksucker,cunt,cunts,dickhead,fuck,fucked,fucking,fuckoff,fucks,handjob,handjobs,motherfucker,mother-fucker,motherfuckers,muthafucker,muthafuckers,nigga,niggs,nigger,niggers,pedofile,pedophile,phag,phuc,phuck,phucked,phucker,shat,shit,shits,shithead,shitter,shitting\".split(\",\") selection =", "\"`say <quote>`: Say a quote and display it on the screen\"), (\"fuck\", \"Swear", "(r'^help$',cmd_help), (r'^$',cmd_help), (r'^\\?$',cmd_help), (r'^vol (\\d+|up|down)$',cmd_set_vol), (r'^vol$',cmd_get_vol), (r'^stop$',cmd_rm_top), (r'^stfu$',cmd_rm_top), (r'^skip$',cmd_rm_top), (r'^next$',cmd_rm_top), (r'^pop$',cmd_rm_bot), (r'^undo$',cmd_rm_bot), (r'^oops$',cmd_rm_bot),", "#video_data = { # \"part\": \"contentDetails,snippet,statistics\", # \"key\": self.youtube_api_key, # \"id\": \",\".join(video_ids), #}", "raise Exception(\"Error communicating with volume control.\") if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine", "vol=max(result['vol']-5,0) else: vol=int(vol) if vol>100: raise Exception(\"Volume cannot be greater than 100\") yield", "raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def do(self,message): message=message.strip() for (regex,func) in self.nlp_commands: m=re.match(regex,message,re.I)", "yield self.wildcard_suggest(message) suggestions.extend(rs) raise service.Return({'suggestions':suggestions}) @service.coroutine def queue_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.queue_host,self.queue_port,{\"cmd\":cmd,\"args\":args})", "if len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[-1] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine", "result[0]: raise Exception('No Youtube results found.') url = result[0][\"url\"] title = result[0][\"title\"] yield", "try: result = yield service.json_query(self.queue_host,self.queue_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with queue.\") if", "#} #output.append(sr) self.youtube_cache[q] = output raise service.Return(output) @service.coroutine def youtube_suggest(self, q): videos =", "the last item on the queue\"), (\"bump\", \"Move the last item on the", "= { # \"part\": \"contentDetails,snippet,statistics\", # \"key\": self.youtube_api_key, # \"id\": \",\".join(video_ids), #} #video_form_data", "for the first youtube result for 'match' youtube_search_url = \"https://www.googleapis.com/youtube/v3/search\" search_data = {", "\"unknown\"))) @service.coroutine def cmd_queue(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"Queue is empty.\") result", "\".join(selection) yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued swearing.') @service.coroutine def cmd_fortune(self, q): fortune_args = settings.get(\"fortune_args\",", "chat. # See: http://ridiculousfish.com/blog/posts/YahooChatRooms.html words = \"ahole,aholes,asshole,assholes,asswipe,biatch,bitch,bitches,blo_job,blow_job,blowjob,cocksucker,cunt,cunts,dickhead,fuck,fucked,fucking,fuckoff,fucks,handjob,handjobs,motherfucker,mother-fucker,motherfuckers,muthafucker,muthafuckers,nigga,niggs,nigger,niggers,pedofile,pedophile,phag,phuc,phuck,phucked,phucker,shat,shit,shits,shithead,shitter,shitting\".split(\",\") selection = random.sample(words, 5) text =", "= { 'type': 'text', 'args': { 'text': fortune_text, #'screen_preprocessor': 'none', 'speech_preprocessor': 'pronounce_fortune', 'text2speech':", "for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}): video_ids.append(vid['href'][9:]) print(vid) sr = { \"video_id\": vid['href'], \"url\": \"https://www.youtube.com/watch?v={0}\".format(vid['href'][9:]),", "def cmd_get_vol(self,q): result=yield self.vol_cmd(\"get_vol\") raise service.Return(\"Volume is {0}\".format(result.get(\"vol\", \"unknown\"))) @service.coroutine def cmd_queue(self,q): queue=yield", "- Get volume vol [num] - Set volume vol up|down - Change volume", "Queue Youtube video\"\"\") commands={ 'do': do, 'suggest': suggest, } suggest_commands = [ (\"vol", "print print \"Received signal, attempting graceful shutdown...\" service.ioloop.add_callback_from_signal(nlp.shutdown) signal.signal(signal.SIGTERM, shutdown_handler) signal.signal(signal.SIGINT, shutdown_handler) service.ioloop.start()", "urlparse class NLP(service.JSONCommandProcessor, service.Service): port=settings.ports[\"nlp\"] queue_host='localhost' queue_port=settings.ports[\"queue\"] vol_host='localhost' vol_port=settings.ports[\"vol\"] pretty_params={'youtube':['title'], 'text': ['text']} youtube_api_key", "'suggest': suggest, } suggest_commands = [ (\"vol up\", \"Raise the volume\"), (\"vol down\",", "if t=='text' and 'text' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['text']) return u'({0})'.format(t) @service.coroutine def cmd_bug(self,q,text):", "result[0][\"title\"] yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\":url}}) raise service.Return(u'Queued \"{0}\"'.format(title)) @service.coroutine def cmd_youtube_raw(self,q,url): yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\": url}}) raise", "'args': { 'text': fortune_text, #'screen_preprocessor': 'none', 'speech_preprocessor': 'pronounce_fortune', 'text2speech': 'google', 'text2screen': 'paragraph', #'renderer':", "+ suffix}) password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() try: password_mgr.add_password(None, bug_url, settings.github_login[0], settings.github_login[1]) except AttributeError: raise", "h > 0: human_str = \"{0}:{1:02d}:{2:02d}\".format(h, m, s) else: human_str = \"{1}:{2:02d}\".format(h, m,", "yield func(self,message,*m.groups()) raise service.Return(result) raise Exception(\"Command not recognized.\") #result = yield self.queue_cmd(\"queue\") raise", "vol>100: raise Exception(\"Volume cannot be greater than 100\") yield self.vol_cmd(\"set_vol\",{\"vol\":vol}) raise service.Return(\"Volume set", "\"no\", \"publish_time\": \"no\", \"views\": \"no\", \"duration\": (1,1)} output.append(sr) # output.append(sr) #youtube_video_url = \"https://www.googleapis.com/youtube/v3/videos\"", "as a background\"), (\"video\", \"`video <url>`: Play a video\"), ] #TODO: splash nlp_commands=[", "raise service.Return(u\"Queued fortune.\") def pretty(self,mod): t=mod['type'] if t=='youtube' and 'title' in mod['parameters']: return", "(r'^queue$',cmd_queue), (r'^cur(?:rent)?$',cmd_current), (r'^say (.+)$',cmd_say), (r'^image (.+)$',cmd_image), (r'^youtube (.+)$',cmd_youtube_raw), (r'^video (.+)$',cmd_youtube_raw), (r'^bug (.+)$',cmd_bug), (r'(https?://.+(?:gif|jpe?g|png|bmp))',cmd_image),", "{0}\".format(vol)) @service.coroutine def cmd_get_vol(self,q): result=yield self.vol_cmd(\"get_vol\") raise service.Return(\"Volume is {0}\".format(result.get(\"vol\", \"unknown\"))) @service.coroutine def", "tornado.httpclient import traceback import urllib import urllib2 import shmooze.lib.packet as packet import shmooze.lib.service", "(.+)$',cmd_image), (r'^youtube (.+)$',cmd_youtube_raw), (r'^video (.+)$',cmd_youtube_raw), (r'^bug (.+)$',cmd_bug), (r'(https?://.+(?:gif|jpe?g|png|bmp))',cmd_image), (r'(https?://.+)',cmd_youtube_raw), (r'^(.+)$',cmd_yt), ] nlp =", "subprocess import tornado.httpclient import traceback import urllib import urllib2 import shmooze.lib.packet as packet", "= [ (\"vol up\", \"Raise the volume\"), (\"vol down\", \"Lower the volume\"), (\"skip\",", "= \"\\n\\nSubmitted via NLP service.\" bug_data = json.dumps({'title': text, 'body' : text +", "\"q\": q, } query = q.replace(\" \", \"+\") url = \"https://www.youtube.com/results?search_query=\" + query", "yield self.vol_cmd(\"set_vol\",{\"vol\":vol}) raise service.Return(\"Volume set to {0}\".format(vol)) @service.coroutine def cmd_get_vol(self,q): result=yield self.vol_cmd(\"get_vol\") raise", "def url_suggest(self, url): #TODO results = [{ \"title\": url, \"action\": url, \"help\": \"\",", "help|? - This vol - Get volume vol [num] - Set volume vol", "s, human_str @service.coroutine def youtube_search(self,q): if q in self.youtube_cache: print \"cache hit\" raise", "\"Lower the volume\"), (\"skip\", \"Remove the current item on the queue that is", "\"part\": \"snippet\", \"key\": self.youtube_api_key, \"order\": \"relevance\", \"safesearch\": \"none\", \"type\": \"video\", \"max-results\": 5, \"q\":", "self.youtube_cache[q] = output raise service.Return(output) @service.coroutine def youtube_suggest(self, q): videos = yield self.youtube_search(q)", "results.append({ \"title\": u\"{0[title]} - [{0[duration][1]}]\".format(v), \"action\": v['url'], \"help\": \"Play video from YouTube\", \"match\":", "the current item on the queue that is currently playing\"), (\"pop\", \"Remove the", "configured in settings.json\") handler = urllib2.HTTPBasicAuthHandler(password_mgr) #TODO request(bug_url, bug_data, auth=(musicazoo-bugs, musicaz00) raise service.Return(u'Submitted", "on the queue that is currently playing\"), (\"pop\", \"Remove the last item on", "self.youtube_search(q) results = [] for v in videos: results.append({ \"title\": u\"{0[title]} - [{0[duration][1]}]\".format(v),", "port=settings.ports[\"nlp\"] queue_host='localhost' queue_port=settings.ports[\"queue\"] vol_host='localhost' vol_port=settings.ports[\"vol\"] pretty_params={'youtube':['title'], 'text': ['text']} youtube_api_key = settings.youtube_api_key def __init__(self):", "hms_str).groups() except: print hms_str return 0, hms_str h, m, s = [int(m.strip(\"HMS\")) if", "u'{0}'.format(mod['parameters']['short_description']) if t=='text' and 'text' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['text']) return u'({0})'.format(t) @service.coroutine def", "if text.startswith(\"http:\"): rs = yield self.url_suggest(text) results.extend(rs) yr = yield self.youtube_suggest(text) results.extend(yr) raise", "service.Return(results) @service.coroutine def url_suggest(self, url): #TODO results = [{ \"title\": url, \"action\": url,", "to yahoo chat. # See: http://ridiculousfish.com/blog/posts/YahooChatRooms.html words = \"ahole,aholes,asshole,assholes,asswipe,biatch,bitch,bitches,blo_job,blow_job,blowjob,cocksucker,cunt,cunts,dickhead,fuck,fucked,fucking,fuckoff,fucks,handjob,handjobs,motherfucker,mother-fucker,motherfuckers,muthafucker,muthafuckers,nigga,niggs,nigger,niggers,pedofile,pedophile,phag,phuc,phuck,phucked,phucker,shat,shit,shits,shithead,shitter,shitting\".split(\",\") selection = random.sample(words, 5)", "return u'({0})'.format(t) @service.coroutine def cmd_bug(self,q,text): bug_url = \"https://api.github.com/repos/zbanks/musicazoo/issues\" suffix = \"\\n\\nSubmitted via NLP", "json.dumps({'title': text, 'body' : text + suffix}) password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() try: password_mgr.add_password(None, bug_url,", "parse_duration(dstr): # Parse ISO 8601 duration strings: PT#M#S hms_str = dstr.strip() try: matches", "try: matches = re.match(r\"PT(\\d+H)?(\\d{1,2}M)?(\\d{1,2}S)\", hms_str).groups() except: print hms_str return 0, hms_str h, m,", "\"url\": \"http://www.youtube.com/watch?v={0}\".format(yi[\"id\"]), # \"title\": yi[\"snippet\"][\"title\"], # \"thumbnail\": yi[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"], # \"publish_time\": yi[\"snippet\"][\"publishedAt\"], # \"views\":", "\"action\": v['url'], \"help\": \"Play video from YouTube\", \"match\": 0, }) raise service.Return(results) @service.coroutine", "\"{0}\"'.format(title)) @service.coroutine def cmd_youtube_raw(self,q,url): yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_image(self,q,url):", "service.Return(result) @service.coroutine def cmd_rm_top(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[0]", "raise service.Return(u'Queued text.') @service.coroutine def cmd_say(self,q,text): yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued text.') @service.coroutine def", "> 0: human_str = \"{0}:{1:02d}:{2:02d}\".format(h, m, s) else: human_str = \"{1}:{2:02d}\".format(h, m, s)", "text = \" \".join(selection) yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued swearing.') @service.coroutine def cmd_fortune(self, q):", "raise service.Return({'message':'Did '+message}) def shutdown(self): service.ioloop.stop() @service.coroutine def cmd_set_vol(self,q,vol): if vol=='up': result=yield self.vol_cmd(\"get_vol\")", "(r'(https?://.+(?:gif|jpe?g|png|bmp))',cmd_image), (r'(https?://.+)',cmd_youtube_raw), (r'^(.+)$',cmd_yt), ] nlp = NLP() def shutdown_handler(signum,frame): print print \"Received signal,", "screen\"), (\"fuck\", \"Swear a bunch\"), (\"quote\", \"Display a quote from the fortune database\"),", "if len(queue)==0: raise Exception(\"Queue is empty!\") if len(queue)==1: raise Exception(\"Only one thing on", "import urllib2 import shmooze.lib.packet as packet import shmooze.lib.service as service import shmooze.settings as", "__init__(self): print \"NLP started.\" self.youtube_cache = {} super(NLP, self).__init__() @staticmethod def parse_duration(dstr): #", "suffix = \"\\n\\nSubmitted via NLP service.\" bug_data = json.dumps({'title': text, 'body' : text", "(r'^image (.+)$',cmd_image), (r'^youtube (.+)$',cmd_youtube_raw), (r'^video (.+)$',cmd_youtube_raw), (r'^bug (.+)$',cmd_bug), (r'(https?://.+(?:gif|jpe?g|png|bmp))',cmd_image), (r'(https?://.+)',cmd_youtube_raw), (r'^(.+)$',cmd_yt), ] nlp", "@service.coroutine def cmd_swear(self,q): # Swear words according to yahoo chat. # See: http://ridiculousfish.com/blog/posts/YahooChatRooms.html", "= yield self.youtube_search(q) results = [] for v in videos: results.append({ \"title\": u\"{0[title]}", "print \"NLP started.\" self.youtube_cache = {} super(NLP, self).__init__() @staticmethod def parse_duration(dstr): # Parse", "def cmd_queue(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"Queue is empty.\") result = '\\n'.join([u\"{0}.", "random.sample(words, 5) text = \" \".join(selection) yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued swearing.') @service.coroutine def", "queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") if len(queue)==1: raise Exception(\"Only one", "[num] - Set volume vol up|down - Change volume stop|stfu|skip|next - Remove the", "(r'^pop$',cmd_rm_bot), (r'^undo$',cmd_rm_bot), (r'^oops$',cmd_rm_bot), (r'^bump$',cmd_bump), (r'^fuck$',cmd_swear), (r'^fortune$',cmd_fortune), (r'^quote$',cmd_fortune), (r'^q$',cmd_queue), (r'^queue$',cmd_queue), (r'^cur(?:rent)?$',cmd_current), (r'^say (.+)$',cmd_say), (r'^image", "Exception(\"Error communicating with queue.\") if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def vol_cmd(self,cmd,args={},assert_success=True):", "new_uids=old_uids[-1:]+old_uids[0:-1] yield self.queue_cmd(\"mv\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Bumped {0} to the top\".format(self.pretty(mod_bot))) @service.coroutine def cmd_yt(self,q,kw): result=yield", "@service.coroutine def url_suggest(self, url): #TODO results = [{ \"title\": url, \"action\": url, \"help\":", "\"none\", \"type\": \"video\", \"max-results\": 5, \"q\": q, } query = q.replace(\" \", \"+\")", "\"thumbnail\": \"no\", \"publish_time\": \"no\", \"views\": \"no\", \"duration\": (1,1)} output.append(sr) # output.append(sr) #youtube_video_url =", "0, }) raise service.Return(results) @service.coroutine def url_suggest(self, url): #TODO results = [{ \"title\":", "bug - Submit a bug report Anything else - Queue Youtube video\"\"\") commands={", "#output.append(sr) self.youtube_cache[q] = output raise service.Return(output) @service.coroutine def youtube_suggest(self, q): videos = yield", "= dstr.strip() try: matches = re.match(r\"PT(\\d+H)?(\\d{1,2}M)?(\\d{1,2}S)\", hms_str).groups() except: print hms_str return 0, hms_str", "\"cache hit\" raise service.Return(self.youtube_cache[q]) print \"cache miss\" http_client = tornado.httpclient.AsyncHTTPClient() # Return the", "videos: results.append({ \"title\": u\"{0[title]} - [{0[duration][1]}]\".format(v), \"action\": v['url'], \"help\": \"Play video from YouTube\",", "video_json['items']: # sr = { # \"video_id\": yi[\"id\"], # \"url\": \"http://www.youtube.com/watch?v={0}\".format(yi[\"id\"]), # \"title\":", "'duration': 5, } } yield self.queue_cmd(\"add\", data) raise service.Return(u\"Queued fortune.\") def pretty(self,mod): t=mod['type']", "the top video pop|undo|oops - Remove the bottom video bump - Move the", "item on the queue that is currently playing\"), (\"pop\", \"Remove the last item", "communicating with queue.\") if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def vol_cmd(self,cmd,args={},assert_success=True): try:", "currently playing\"), (\"pop\", \"Remove the last item on the queue\"), (\"bump\", \"Move the", "\"{0}:{1:02d}:{2:02d}\".format(h, m, s) else: human_str = \"{1}:{2:02d}\".format(h, m, s) return h * 360", "display it on the screen\"), (\"fuck\", \"Swear a bunch\"), (\"quote\", \"Display a quote", "the fortune database\"), (\"image\", \"`image <url>`: Display an image on the screen as", "service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_rm_bot(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\")", "= self.pretty(queue[0]) raise service.Return(result) @service.coroutine def cmd_rm_top(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue", "video pop|undo|oops - Remove the bottom video bump - Move the bottom video", "@service.coroutine def cmd_get_vol(self,q): result=yield self.vol_cmd(\"get_vol\") raise service.Return(\"Volume is {0}\".format(result.get(\"vol\", \"unknown\"))) @service.coroutine def cmd_queue(self,q):", "result=yield self.vol_cmd(\"get_vol\") vol=max(result['vol']-5,0) else: vol=int(vol) if vol>100: raise Exception(\"Volume cannot be greater than", "screen as a background\"), (\"video\", \"`video <url>`: Play a video\"), ] #TODO: splash", "queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"Queue is empty.\") result = '\\n'.join([u\"{0}. {1}\".format(n+1,self.pretty(mod)) for", "if m is not None else 0 for m in matches] if h", "be greater than 100\") yield self.vol_cmd(\"set_vol\",{\"vol\":vol}) raise service.Return(\"Volume set to {0}\".format(vol)) @service.coroutine def", "cmd_image(self,q,url): yield self.queue_cmd(\"set_bg\",{\"type\":\"image\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_say(self,q,text): yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise", ": text + suffix}) password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() try: password_mgr.add_password(None, bug_url, settings.github_login[0], settings.github_login[1]) except", "yield self.queue_cmd(\"add\", data) raise service.Return(u\"Queued fortune.\") def pretty(self,mod): t=mod['type'] if t=='youtube' and 'title'", "= \"{0}:{1:02d}:{2:02d}\".format(h, m, s) else: human_str = \"{1}:{2:02d}\".format(h, m, s) return h *", "@staticmethod def parse_duration(dstr): # Parse ISO 8601 duration strings: PT#M#S hms_str = dstr.strip()", "commands={ 'do': do, 'suggest': suggest, } suggest_commands = [ (\"vol up\", \"Raise the", "bug_data, auth=(musicazoo-bugs, musicaz00) raise service.Return(u'Submitted bug: %s - thanks!') @service.coroutine def cmd_help(self,q): raise", "that is currently playing\"), (\"pop\", \"Remove the last item on the queue\"), (\"bump\",", "output raise service.Return(output) @service.coroutine def youtube_suggest(self, q): videos = yield self.youtube_search(q) results =", "= yield func(self,message,*m.groups()) raise service.Return(result) raise Exception(\"Command not recognized.\") #result = yield self.queue_cmd(\"queue\")", "- Remove the top video pop|undo|oops - Remove the bottom video bump -", "Youtube video\"\"\") commands={ 'do': do, 'suggest': suggest, } suggest_commands = [ (\"vol up\",", "queue to top of the queue and play it\"), (\"say\", \"`say <quote>`: Say", "re import signal import socket import subprocess import tornado.httpclient import traceback import urllib", "self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[-1] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod)))", "q): fortune_args = settings.get(\"fortune_args\", ['-s']) fortune_text = subprocess.check_output(['/usr/games/fortune'] + fortune_args) data = {", "assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def vol_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.vol_host,self.vol_port,{\"cmd\":cmd,\"args\":args})", "text.strip() results = [] if text.startswith(\"http:\"): rs = yield self.url_suggest(text) results.extend(rs) yr =", "import urllib import urllib2 import shmooze.lib.packet as packet import shmooze.lib.service as service import", "= yield http_client.fetch(youtube_video_url + \"?\" + video_form_data) #video_json = json.loads(video_results.body) #output = []", "def queue_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.queue_host,self.queue_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with", "result = self.pretty(queue[0]) raise service.Return(result) @service.coroutine def cmd_rm_top(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise", "0, hms_str h, m, s = [int(m.strip(\"HMS\")) if m is not None else", "'text2screen': 'paragraph', #'renderer': 'mono_paragraph', 'duration': 5, } } yield self.queue_cmd(\"add\", data) raise service.Return(u\"Queued", "(.+)$',cmd_youtube_raw), (r'^bug (.+)$',cmd_bug), (r'(https?://.+(?:gif|jpe?g|png|bmp))',cmd_image), (r'(https?://.+)',cmd_youtube_raw), (r'^(.+)$',cmd_yt), ] nlp = NLP() def shutdown_handler(signum,frame): print", "up\", \"Raise the volume\"), (\"vol down\", \"Lower the volume\"), (\"skip\", \"Remove the current", "(r'^skip$',cmd_rm_top), (r'^next$',cmd_rm_top), (r'^pop$',cmd_rm_bot), (r'^undo$',cmd_rm_bot), (r'^oops$',cmd_rm_bot), (r'^bump$',cmd_bump), (r'^fuck$',cmd_swear), (r'^fortune$',cmd_fortune), (r'^quote$',cmd_fortune), (r'^q$',cmd_queue), (r'^queue$',cmd_queue), (r'^cur(?:rent)?$',cmd_current), (r'^say", "import signal import socket import subprocess import tornado.httpclient import traceback import urllib import", "swearing.') @service.coroutine def cmd_fortune(self, q): fortune_args = settings.get(\"fortune_args\", ['-s']) fortune_text = subprocess.check_output(['/usr/games/fortune'] +", "self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[0] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod)))", "settings.youtube_api_key def __init__(self): print \"NLP started.\" self.youtube_cache = {} super(NLP, self).__init__() @staticmethod def", "the volume\"), (\"skip\", \"Remove the current item on the queue that is currently", "import json import os import random import re import signal import socket import", "top q|queue - List the queue cur|current - Give the current item playing", "YouTube\", \"match\": 0, }) raise service.Return(results) @service.coroutine def url_suggest(self, url): #TODO results =", "#TODO: splash nlp_commands=[ (r'^help$',cmd_help), (r'^$',cmd_help), (r'^\\?$',cmd_help), (r'^vol (\\d+|up|down)$',cmd_set_vol), (r'^vol$',cmd_get_vol), (r'^stop$',cmd_rm_top), (r'^stfu$',cmd_rm_top), (r'^skip$',cmd_rm_top), (r'^next$',cmd_rm_top),", "pretty(self,mod): t=mod['type'] if t=='youtube' and 'title' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['title']) #if t=='netvid': #", "raise Exception(\"Queue is empty!\") mod=queue[0] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_rm_bot(self,q):", "See: http://ridiculousfish.com/blog/posts/YahooChatRooms.html words = \"ahole,aholes,asshole,assholes,asswipe,biatch,bitch,bitches,blo_job,blow_job,blowjob,cocksucker,cunt,cunts,dickhead,fuck,fucked,fucking,fuckoff,fucks,handjob,handjobs,motherfucker,mother-fucker,motherfuckers,muthafucker,muthafuckers,nigga,niggs,nigger,niggers,pedofile,pedophile,phag,phuc,phuck,phucked,phucker,shat,shit,shits,shithead,shitter,shitting\".split(\",\") selection = random.sample(words, 5) text = \" \".join(selection)", "len(queue)==0: raise service.Return(\"(Nothing)\") result = self.pretty(queue[0]) raise service.Return(result) @service.coroutine def cmd_rm_top(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params})", "url}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_say(self,q,text): yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued text.') @service.coroutine", "def cmd_fortune(self, q): fortune_args = settings.get(\"fortune_args\", ['-s']) fortune_text = subprocess.check_output(['/usr/games/fortune'] + fortune_args) data", "ISO 8601 duration strings: PT#M#S hms_str = dstr.strip() try: matches = re.match(r\"PT(\\d+H)?(\\d{1,2}M)?(\\d{1,2}S)\", hms_str).groups()", "not None else 0 for m in matches] if h > 0: human_str", "@service.coroutine def wildcard_suggest(self, text): text = text.strip() results = [] if text.startswith(\"http:\"): rs", "sc, \"help\": sc_help, \"match\": len(stripped_message) }) rs = yield self.wildcard_suggest(message) suggestions.extend(rs) raise service.Return({'suggestions':suggestions})", "'text' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['text']) return u'({0})'.format(t) @service.coroutine def cmd_bug(self,q,text): bug_url = \"https://api.github.com/repos/zbanks/musicazoo/issues\"", "'mono_paragraph', 'duration': 5, } } yield self.queue_cmd(\"add\", data) raise service.Return(u\"Queued fortune.\") def pretty(self,mod):", "queue.\") if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def vol_cmd(self,cmd,args={},assert_success=True): try: result =", "not result or not result[0]: raise Exception('No Youtube results found.') url = result[0][\"url\"]", "Remove the top video pop|undo|oops - Remove the bottom video bump - Move", "item on the queue to top of the queue and play it\"), (\"say\",", "the args dict for the first youtube result for 'match' youtube_search_url = \"https://www.googleapis.com/youtube/v3/search\"", "Exception(\"Volume cannot be greater than 100\") yield self.vol_cmd(\"set_vol\",{\"vol\":vol}) raise service.Return(\"Volume set to {0}\".format(vol))", "queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[-1] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed", "'body' : text + suffix}) password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() try: password_mgr.add_password(None, bug_url, settings.github_login[0], settings.github_login[1])", "+ fortune_args) data = { 'type': 'text', 'args': { 'text': fortune_text, #'screen_preprocessor': 'none',", "from urlparse import urlparse class NLP(service.JSONCommandProcessor, service.Service): port=settings.ports[\"nlp\"] queue_host='localhost' queue_port=settings.ports[\"queue\"] vol_host='localhost' vol_port=settings.ports[\"vol\"] pretty_params={'youtube':['title'],", "s) else: human_str = \"{1}:{2:02d}\".format(h, m, s) return h * 360 + m", "url, \"help\": \"\", \"match\": len(url) }] yield service.Return(results) @service.coroutine def wildcard_suggest(self, text): text", "results found.') url = result[0][\"url\"] title = result[0][\"title\"] yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\":url}}) raise service.Return(u'Queued \"{0}\"'.format(title))", "raise service.Return(u'Submitted bug: %s - thanks!') @service.coroutine def cmd_help(self,q): raise service.Return(\"\"\"Commands I understand:", "\"{1}:{2:02d}\".format(h, m, s) return h * 360 + m * 60 + s,", "#\"duration\": self.parse_duration(yi[\"contentDetails\"][\"duration\"]), #} #output.append(sr) self.youtube_cache[q] = output raise service.Return(output) @service.coroutine def youtube_suggest(self, q):", "raise service.Return(u\"No github account configured in settings.json\") handler = urllib2.HTTPBasicAuthHandler(password_mgr) #TODO request(bug_url, bug_data,", "(regex,func) in self.nlp_commands: m=re.match(regex,message,re.I) if m: result = yield func(self,message,*m.groups()) raise service.Return(result) raise", "elif vol=='down': result=yield self.vol_cmd(\"get_vol\") vol=max(result['vol']-5,0) else: vol=int(vol) if vol>100: raise Exception(\"Volume cannot be", "@service.coroutine def youtube_search(self,q): if q in self.youtube_cache: print \"cache hit\" raise service.Return(self.youtube_cache[q]) print", "List the queue cur|current - Give the current item playing bug - Submit", "#video_form_data = urllib.urlencode(video_data) #video_results = yield http_client.fetch(youtube_video_url + \"?\" + video_form_data) #video_json =", "\"action\": sc, \"help\": sc_help, \"match\": len(stripped_message) }) rs = yield self.wildcard_suggest(message) suggestions.extend(rs) raise", "#'screen_preprocessor': 'none', 'speech_preprocessor': 'pronounce_fortune', 'text2speech': 'google', 'text2screen': 'paragraph', #'renderer': 'mono_paragraph', 'duration': 5, }", "(\"fuck\", \"Swear a bunch\"), (\"quote\", \"Display a quote from the fortune database\"), (\"image\",", "= urllib.urlencode(video_data) #video_results = yield http_client.fetch(youtube_video_url + \"?\" + video_form_data) #video_json = json.loads(video_results.body)", "self.queue_cmd(\"set_bg\",{\"type\":\"image\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_say(self,q,text): yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued text.')", "- Give the current item playing bug - Submit a bug report Anything", "queue_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.queue_host,self.queue_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with queue.\")", "for m in matches] if h > 0: human_str = \"{0}:{1:02d}:{2:02d}\".format(h, m, s)", "settings.github_login[1]) except AttributeError: raise service.Return(u\"No github account configured in settings.json\") handler = urllib2.HTTPBasicAuthHandler(password_mgr)", "as settings import urllib2 from bs4 import BeautifulSoup from urlparse import urlparse class", "@service.coroutine def suggest(self,message): stripped_message = message.strip() suggestions = [] for sc, sc_help in", "queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"(Nothing)\") result = self.pretty(queue[0]) raise service.Return(result) @service.coroutine def", "musicaz00) raise service.Return(u'Submitted bug: %s - thanks!') @service.coroutine def cmd_help(self,q): raise service.Return(\"\"\"Commands I", "- List the queue cur|current - Give the current item playing bug -", "\"https://www.googleapis.com/youtube/v3/search\" search_data = { \"part\": \"snippet\", \"key\": self.youtube_api_key, \"order\": \"relevance\", \"safesearch\": \"none\", \"type\":", "yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_swear(self,q): # Swear words according to", "] #TODO: splash nlp_commands=[ (r'^help$',cmd_help), (r'^$',cmd_help), (r'^\\?$',cmd_help), (r'^vol (\\d+|up|down)$',cmd_set_vol), (r'^vol$',cmd_get_vol), (r'^stop$',cmd_rm_top), (r'^stfu$',cmd_rm_top), (r'^skip$',cmd_rm_top),", "vol_host='localhost' vol_port=settings.ports[\"vol\"] pretty_params={'youtube':['title'], 'text': ['text']} youtube_api_key = settings.youtube_api_key def __init__(self): print \"NLP started.\"", "Give the current item playing bug - Submit a bug report Anything else", "= re.match(r\"PT(\\d+H)?(\\d{1,2}M)?(\\d{1,2}S)\", hms_str).groups() except: print hms_str return 0, hms_str h, m, s =", "\"id\": \",\".join(video_ids), #} #video_form_data = urllib.urlencode(video_data) #video_results = yield http_client.fetch(youtube_video_url + \"?\" +", "t=='youtube' and 'title' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['title']) #if t=='netvid': # return u'{0}'.format(mod['parameters']['short_description']) if", "Submit a bug report Anything else - Queue Youtube video\"\"\") commands={ 'do': do,", "\"https://www.youtube.com/watch?v={0}\".format(vid['href'][9:]), \"title\": vid['title'], \"thumbnail\": \"no\", \"publish_time\": \"no\", \"views\": \"no\", \"duration\": (1,1)} output.append(sr) #", "urllib import urllib2 import shmooze.lib.packet as packet import shmooze.lib.service as service import shmooze.settings", "selection = random.sample(words, 5) text = \" \".join(selection) yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued swearing.')", "bunch\"), (\"quote\", \"Display a quote from the fortune database\"), (\"image\", \"`image <url>`: Display", "\"Remove the current item on the queue that is currently playing\"), (\"pop\", \"Remove", "queue_host='localhost' queue_port=settings.ports[\"queue\"] vol_host='localhost' vol_port=settings.ports[\"vol\"] pretty_params={'youtube':['title'], 'text': ['text']} youtube_api_key = settings.youtube_api_key def __init__(self): print", "vol_port=settings.ports[\"vol\"] pretty_params={'youtube':['title'], 'text': ['text']} youtube_api_key = settings.youtube_api_key def __init__(self): print \"NLP started.\" self.youtube_cache", "\"ahole,aholes,asshole,assholes,asswipe,biatch,bitch,bitches,blo_job,blow_job,blowjob,cocksucker,cunt,cunts,dickhead,fuck,fucked,fucking,fuckoff,fucks,handjob,handjobs,motherfucker,mother-fucker,motherfuckers,muthafucker,muthafuckers,nigga,niggs,nigger,niggers,pedofile,pedophile,phag,phuc,phuck,phucked,phucker,shat,shit,shits,shithead,shitter,shitting\".split(\",\") selection = random.sample(words, 5) text = \" \".join(selection) yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued", "u'\"{0}\"'.format(mod['parameters']['text']) return u'({0})'.format(t) @service.coroutine def cmd_bug(self,q,text): bug_url = \"https://api.github.com/repos/zbanks/musicazoo/issues\" suffix = \"\\n\\nSubmitted via", "if not result or not result[0]: raise Exception('No Youtube results found.') url =", "(r'^$',cmd_help), (r'^\\?$',cmd_help), (r'^vol (\\d+|up|down)$',cmd_set_vol), (r'^vol$',cmd_get_vol), (r'^stop$',cmd_rm_top), (r'^stfu$',cmd_rm_top), (r'^skip$',cmd_rm_top), (r'^next$',cmd_rm_top), (r'^pop$',cmd_rm_bot), (r'^undo$',cmd_rm_bot), (r'^oops$',cmd_rm_bot), (r'^bump$',cmd_bump),", "def cmd_yt(self,q,kw): result=yield self.youtube_search(kw) if not result or not result[0]: raise Exception('No Youtube", "- Change volume stop|stfu|skip|next - Remove the top video pop|undo|oops - Remove the", "(r'^cur(?:rent)?$',cmd_current), (r'^say (.+)$',cmd_say), (r'^image (.+)$',cmd_image), (r'^youtube (.+)$',cmd_youtube_raw), (r'^video (.+)$',cmd_youtube_raw), (r'^bug (.+)$',cmd_bug), (r'(https?://.+(?:gif|jpe?g|png|bmp))',cmd_image), (r'(https?://.+)',cmd_youtube_raw),", "else: vol=int(vol) if vol>100: raise Exception(\"Volume cannot be greater than 100\") yield self.vol_cmd(\"set_vol\",{\"vol\":vol})", "nlp = NLP() def shutdown_handler(signum,frame): print print \"Received signal, attempting graceful shutdown...\" service.ioloop.add_callback_from_signal(nlp.shutdown)", "(r'^q$',cmd_queue), (r'^queue$',cmd_queue), (r'^cur(?:rent)?$',cmd_current), (r'^say (.+)$',cmd_say), (r'^image (.+)$',cmd_image), (r'^youtube (.+)$',cmd_youtube_raw), (r'^video (.+)$',cmd_youtube_raw), (r'^bug (.+)$',cmd_bug),", "len(queue)==0: raise Exception(\"Queue is empty!\") if len(queue)==1: raise Exception(\"Only one thing on the", "yield http_client.fetch(youtube_video_url + \"?\" + video_form_data) #video_json = json.loads(video_results.body) #output = [] #for", "} suggest_commands = [ (\"vol up\", \"Raise the volume\"), (\"vol down\", \"Lower the", "= urllib2.urlopen(url) html = response.read() soup = BeautifulSoup(html, 'html.parser') video_ids = [] output", "self.youtube_suggest(text) results.extend(yr) raise service.Return(results) @service.coroutine def suggest(self,message): stripped_message = message.strip() suggestions = []", "def cmd_bump(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") if len(queue)==1: raise", "service.Return(u\"Queued fortune.\") def pretty(self,mod): t=mod['type'] if t=='youtube' and 'title' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['title'])", "\"key\": self.youtube_api_key, # \"id\": \",\".join(video_ids), #} #video_form_data = urllib.urlencode(video_data) #video_results = yield http_client.fetch(youtube_video_url", "# Parse ISO 8601 duration strings: PT#M#S hms_str = dstr.strip() try: matches =", "the queue\"), (\"bump\", \"Move the last item on the queue to top of", "(r'^fortune$',cmd_fortune), (r'^quote$',cmd_fortune), (r'^q$',cmd_queue), (r'^queue$',cmd_queue), (r'^cur(?:rent)?$',cmd_current), (r'^say (.+)$',cmd_say), (r'^image (.+)$',cmd_image), (r'^youtube (.+)$',cmd_youtube_raw), (r'^video (.+)$',cmd_youtube_raw),", "\"action\": url, \"help\": \"\", \"match\": len(url) }] yield service.Return(results) @service.coroutine def wildcard_suggest(self, text):", "\", \"+\") url = \"https://www.youtube.com/results?search_query=\" + query response = urllib2.urlopen(url) html = response.read()", "import random import re import signal import socket import subprocess import tornado.httpclient import", "\"publish_time\": yi[\"snippet\"][\"publishedAt\"], # \"views\": yi[\"statistics\"][\"viewCount\"], #\"duration\": self.parse_duration(yi[\"contentDetails\"][\"duration\"]), #} #output.append(sr) self.youtube_cache[q] = output raise", "yahoo chat. # See: http://ridiculousfish.com/blog/posts/YahooChatRooms.html words = \"ahole,aholes,asshole,assholes,asswipe,biatch,bitch,bitches,blo_job,blow_job,blowjob,cocksucker,cunt,cunts,dickhead,fuck,fucked,fucking,fuckoff,fucks,handjob,handjobs,motherfucker,mother-fucker,motherfuckers,muthafucker,muthafuckers,nigga,niggs,nigger,niggers,pedofile,pedophile,phag,phuc,phuck,phucked,phucker,shat,shit,shits,shithead,shitter,shitting\".split(\",\") selection = random.sample(words, 5) text", "matches] if h > 0: human_str = \"{0}:{1:02d}:{2:02d}\".format(h, m, s) else: human_str =", "Anything else - Queue Youtube video\"\"\") commands={ 'do': do, 'suggest': suggest, } suggest_commands", "Exception(\"Queue is empty!\") if len(queue)==1: raise Exception(\"Only one thing on the queue!\") old_uids=[mod['uid']", "service.json_query(self.vol_host,self.vol_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with volume control.\") if assert_success: raise service.Return(packet.assert_success(result))", "(socket.error,service.TimeoutError): raise Exception(\"Error communicating with queue.\") if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine", "output = [] for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}): video_ids.append(vid['href'][9:]) print(vid) sr = { \"video_id\":", "service.Return(output) @service.coroutine def youtube_suggest(self, q): videos = yield self.youtube_search(q) results = [] for", "@service.coroutine def cmd_queue(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"Queue is empty.\") result =", "yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_image(self,q,url): yield self.queue_cmd(\"set_bg\",{\"type\":\"image\",\"args\":{\"url\": url}}) raise", "bug_url, settings.github_login[0], settings.github_login[1]) except AttributeError: raise service.Return(u\"No github account configured in settings.json\") handler", "stripped_message = message.strip() suggestions = [] for sc, sc_help in self.suggest_commands: if sc.startswith(stripped_message):", "\"Display a quote from the fortune database\"), (\"image\", \"`image <url>`: Display an image", "vol=min(result['vol']+5,100) elif vol=='down': result=yield self.vol_cmd(\"get_vol\") vol=max(result['vol']-5,0) else: vol=int(vol) if vol>100: raise Exception(\"Volume cannot", "t=='text' and 'text' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['text']) return u'({0})'.format(t) @service.coroutine def cmd_bug(self,q,text): bug_url", "is empty!\") if len(queue)==1: raise Exception(\"Only one thing on the queue!\") old_uids=[mod['uid'] for", "cannot be greater than 100\") yield self.vol_cmd(\"set_vol\",{\"vol\":vol}) raise service.Return(\"Volume set to {0}\".format(vol)) @service.coroutine", "mod in queue] mod_bot=queue[-1] new_uids=old_uids[-1:]+old_uids[0:-1] yield self.queue_cmd(\"mv\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Bumped {0} to the top\".format(self.pretty(mod_bot)))", "the bottom video bump - Move the bottom video to the top q|queue", "service.Return(\"(Nothing)\") result = self.pretty(queue[0]) raise service.Return(result) @service.coroutine def cmd_rm_top(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0:", "bottom video to the top q|queue - List the queue cur|current - Give", "os import random import re import signal import socket import subprocess import tornado.httpclient", "as packet import shmooze.lib.service as service import shmooze.settings as settings import urllib2 from", "url = \"https://www.youtube.com/results?search_query=\" + query response = urllib2.urlopen(url) html = response.read() soup =", "the top\".format(self.pretty(mod_bot))) @service.coroutine def cmd_yt(self,q,kw): result=yield self.youtube_search(kw) if not result or not result[0]:", "NLP() def shutdown_handler(signum,frame): print print \"Received signal, attempting graceful shutdown...\" service.ioloop.add_callback_from_signal(nlp.shutdown) signal.signal(signal.SIGTERM, shutdown_handler)", "Exception(\"Command not recognized.\") #result = yield self.queue_cmd(\"queue\") raise service.Return({'message':'Did '+message}) def shutdown(self): service.ioloop.stop()", "# \"id\": \",\".join(video_ids), #} #video_form_data = urllib.urlencode(video_data) #video_results = yield http_client.fetch(youtube_video_url + \"?\"", "raise service.Return(output) @service.coroutine def youtube_suggest(self, q): videos = yield self.youtube_search(q) results = []", "raise service.Return(result) raise Exception(\"Command not recognized.\") #result = yield self.queue_cmd(\"queue\") raise service.Return({'message':'Did '+message})", "@service.coroutine def cmd_image(self,q,url): yield self.queue_cmd(\"set_bg\",{\"type\":\"image\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_say(self,q,text): yield", "= '\\n'.join([u\"{0}. {1}\".format(n+1,self.pretty(mod)) for (n,mod) in enumerate(queue)]) raise service.Return(result) @service.coroutine def cmd_current(self,q): queue=yield", "if sc.startswith(stripped_message): suggestions.append({ \"title\": sc, \"action\": sc, \"help\": sc_help, \"match\": len(stripped_message) }) rs", "human_str @service.coroutine def youtube_search(self,q): if q in self.youtube_cache: print \"cache hit\" raise service.Return(self.youtube_cache[q])", "as service import shmooze.settings as settings import urllib2 from bs4 import BeautifulSoup from", "password_mgr.add_password(None, bug_url, settings.github_login[0], settings.github_login[1]) except AttributeError: raise service.Return(u\"No github account configured in settings.json\")", "import shmooze.lib.service as service import shmooze.settings as settings import urllib2 from bs4 import", "vol_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.vol_host,self.vol_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with volume", "= [int(m.strip(\"HMS\")) if m is not None else 0 for m in matches]", "not result[0]: raise Exception('No Youtube results found.') url = result[0][\"url\"] title = result[0][\"title\"]", "report Anything else - Queue Youtube video\"\"\") commands={ 'do': do, 'suggest': suggest, }", "settings.json\") handler = urllib2.HTTPBasicAuthHandler(password_mgr) #TODO request(bug_url, bug_data, auth=(musicazoo-bugs, musicaz00) raise service.Return(u'Submitted bug: %s", "yi[\"snippet\"][\"title\"], # \"thumbnail\": yi[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"], # \"publish_time\": yi[\"snippet\"][\"publishedAt\"], # \"views\": yi[\"statistics\"][\"viewCount\"], #\"duration\": self.parse_duration(yi[\"contentDetails\"][\"duration\"]), #}", "} yield self.queue_cmd(\"add\", data) raise service.Return(u\"Queued fortune.\") def pretty(self,mod): t=mod['type'] if t=='youtube' and", "m in matches] if h > 0: human_str = \"{0}:{1:02d}:{2:02d}\".format(h, m, s) else:", "database\"), (\"image\", \"`image <url>`: Display an image on the screen as a background\"),", "\"type\": \"video\", \"max-results\": 5, \"q\": q, } query = q.replace(\" \", \"+\") url", "or not result[0]: raise Exception('No Youtube results found.') url = result[0][\"url\"] title =", "up|down - Change volume stop|stfu|skip|next - Remove the top video pop|undo|oops - Remove", "@service.coroutine def vol_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.vol_host,self.vol_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating", "= yield self.youtube_suggest(text) results.extend(yr) raise service.Return(results) @service.coroutine def suggest(self,message): stripped_message = message.strip() suggestions", "self.queue_cmd(\"add\", data) raise service.Return(u\"Queued fortune.\") def pretty(self,mod): t=mod['type'] if t=='youtube' and 'title' in", "@service.coroutine def cmd_bug(self,q,text): bug_url = \"https://api.github.com/repos/zbanks/musicazoo/issues\" suffix = \"\\n\\nSubmitted via NLP service.\" bug_data", "import re import signal import socket import subprocess import tornado.httpclient import traceback import", "miss\" http_client = tornado.httpclient.AsyncHTTPClient() # Return the args dict for the first youtube", "\"\\n\\nSubmitted via NLP service.\" bug_data = json.dumps({'title': text, 'body' : text + suffix})", "result=yield self.vol_cmd(\"get_vol\") vol=min(result['vol']+5,100) elif vol=='down': result=yield self.vol_cmd(\"get_vol\") vol=max(result['vol']-5,0) else: vol=int(vol) if vol>100: raise", "bug report Anything else - Queue Youtube video\"\"\") commands={ 'do': do, 'suggest': suggest,", "yield self.queue_cmd(\"set_bg\",{\"type\":\"image\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_say(self,q,text): yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued", "yr = yield self.youtube_suggest(text) results.extend(yr) raise service.Return(results) @service.coroutine def suggest(self,message): stripped_message = message.strip()", "def youtube_search(self,q): if q in self.youtube_cache: print \"cache hit\" raise service.Return(self.youtube_cache[q]) print \"cache", "the queue to top of the queue and play it\"), (\"say\", \"`say <quote>`:", "\"Play video from YouTube\", \"match\": 0, }) raise service.Return(results) @service.coroutine def url_suggest(self, url):", "self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_bump(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue", "#'renderer': 'mono_paragraph', 'duration': 5, } } yield self.queue_cmd(\"add\", data) raise service.Return(u\"Queued fortune.\") def", "result = yield service.json_query(self.queue_host,self.queue_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with queue.\") if assert_success:", "Exception('No Youtube results found.') url = result[0][\"url\"] title = result[0][\"title\"] yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\":url}}) raise", "for mod in queue] mod_bot=queue[-1] new_uids=old_uids[-1:]+old_uids[0:-1] yield self.queue_cmd(\"mv\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Bumped {0} to the", "settings.get(\"fortune_args\", ['-s']) fortune_text = subprocess.check_output(['/usr/games/fortune'] + fortune_args) data = { 'type': 'text', 'args':", "(\"image\", \"`image <url>`: Display an image on the screen as a background\"), (\"video\",", "* 360 + m * 60 + s, human_str @service.coroutine def youtube_search(self,q): if", "} } yield self.queue_cmd(\"add\", data) raise service.Return(u\"Queued fortune.\") def pretty(self,mod): t=mod['type'] if t=='youtube'", "import subprocess import tornado.httpclient import traceback import urllib import urllib2 import shmooze.lib.packet as", "message.strip() suggestions = [] for sc, sc_help in self.suggest_commands: if sc.startswith(stripped_message): suggestions.append({ \"title\":", "\"help\": \"\", \"match\": len(url) }] yield service.Return(results) @service.coroutine def wildcard_suggest(self, text): text =", "(r'^\\?$',cmd_help), (r'^vol (\\d+|up|down)$',cmd_set_vol), (r'^vol$',cmd_get_vol), (r'^stop$',cmd_rm_top), (r'^stfu$',cmd_rm_top), (r'^skip$',cmd_rm_top), (r'^next$',cmd_rm_top), (r'^pop$',cmd_rm_bot), (r'^undo$',cmd_rm_bot), (r'^oops$',cmd_rm_bot), (r'^bump$',cmd_bump), (r'^fuck$',cmd_swear),", "# See: http://ridiculousfish.com/blog/posts/YahooChatRooms.html words = \"ahole,aholes,asshole,assholes,asswipe,biatch,bitch,bitches,blo_job,blow_job,blowjob,cocksucker,cunt,cunts,dickhead,fuck,fucked,fucking,fuckoff,fucks,handjob,handjobs,motherfucker,mother-fucker,motherfuckers,muthafucker,muthafuckers,nigga,niggs,nigger,niggers,pedofile,pedophile,phag,phuc,phuck,phucked,phucker,shat,shit,shits,shithead,shitter,shitting\".split(\",\") selection = random.sample(words, 5) text = \"", "t=='netvid': # return u'{0}'.format(mod['parameters']['short_description']) if t=='text' and 'text' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['text']) return", "service.\" bug_data = json.dumps({'title': text, 'body' : text + suffix}) password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()", "'text': fortune_text, #'screen_preprocessor': 'none', 'speech_preprocessor': 'pronounce_fortune', 'text2speech': 'google', 'text2screen': 'paragraph', #'renderer': 'mono_paragraph', 'duration':", "# \"views\": yi[\"statistics\"][\"viewCount\"], #\"duration\": self.parse_duration(yi[\"contentDetails\"][\"duration\"]), #} #output.append(sr) self.youtube_cache[q] = output raise service.Return(output) @service.coroutine", "sr = { # \"video_id\": yi[\"id\"], # \"url\": \"http://www.youtube.com/watch?v={0}\".format(yi[\"id\"]), # \"title\": yi[\"snippet\"][\"title\"], #", "an image on the screen as a background\"), (\"video\", \"`video <url>`: Play a", "in videos: results.append({ \"title\": u\"{0[title]} - [{0[duration][1]}]\".format(v), \"action\": v['url'], \"help\": \"Play video from", "len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[-1] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def", "service.Return(u'Queued text.') @service.coroutine def cmd_image(self,q,url): yield self.queue_cmd(\"set_bg\",{\"type\":\"image\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine def", "def pretty(self,mod): t=mod['type'] if t=='youtube' and 'title' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['title']) #if t=='netvid':", "bottom video bump - Move the bottom video to the top q|queue -", "the queue cur|current - Give the current item playing bug - Submit a", "url): #TODO results = [{ \"title\": url, \"action\": url, \"help\": \"\", \"match\": len(url)", "(r'^fuck$',cmd_swear), (r'^fortune$',cmd_fortune), (r'^quote$',cmd_fortune), (r'^q$',cmd_queue), (r'^queue$',cmd_queue), (r'^cur(?:rent)?$',cmd_current), (r'^say (.+)$',cmd_say), (r'^image (.+)$',cmd_image), (r'^youtube (.+)$',cmd_youtube_raw), (r'^video", "(r'^bug (.+)$',cmd_bug), (r'(https?://.+(?:gif|jpe?g|png|bmp))',cmd_image), (r'(https?://.+)',cmd_youtube_raw), (r'^(.+)$',cmd_yt), ] nlp = NLP() def shutdown_handler(signum,frame): print print", "{1}\".format(n+1,self.pretty(mod)) for (n,mod) in enumerate(queue)]) raise service.Return(result) @service.coroutine def cmd_current(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if", "current item on the queue that is currently playing\"), (\"pop\", \"Remove the last", "mod_bot=queue[-1] new_uids=old_uids[-1:]+old_uids[0:-1] yield self.queue_cmd(\"mv\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Bumped {0} to the top\".format(self.pretty(mod_bot))) @service.coroutine def cmd_yt(self,q,kw):", "\"title\": url, \"action\": url, \"help\": \"\", \"match\": len(url) }] yield service.Return(results) @service.coroutine def", "vid['href'], \"url\": \"https://www.youtube.com/watch?v={0}\".format(vid['href'][9:]), \"title\": vid['title'], \"thumbnail\": \"no\", \"publish_time\": \"no\", \"views\": \"no\", \"duration\": (1,1)}", "(.+)$',cmd_youtube_raw), (r'^video (.+)$',cmd_youtube_raw), (r'^bug (.+)$',cmd_bug), (r'(https?://.+(?:gif|jpe?g|png|bmp))',cmd_image), (r'(https?://.+)',cmd_youtube_raw), (r'^(.+)$',cmd_yt), ] nlp = NLP() def", "the bottom video to the top q|queue - List the queue cur|current -", "with queue.\") if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def vol_cmd(self,cmd,args={},assert_success=True): try: result", "self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") if len(queue)==1: raise Exception(\"Only one thing", "self.vol_cmd(\"set_vol\",{\"vol\":vol}) raise service.Return(\"Volume set to {0}\".format(vol)) @service.coroutine def cmd_get_vol(self,q): result=yield self.vol_cmd(\"get_vol\") raise service.Return(\"Volume", "urllib2 from bs4 import BeautifulSoup from urlparse import urlparse class NLP(service.JSONCommandProcessor, service.Service): port=settings.ports[\"nlp\"]", "raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def vol_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.vol_host,self.vol_port,{\"cmd\":cmd,\"args\":args}) except", "last item on the queue to top of the queue and play it\"),", "title = result[0][\"title\"] yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\":url}}) raise service.Return(u'Queued \"{0}\"'.format(title)) @service.coroutine def cmd_youtube_raw(self,q,url): yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\":", "according to yahoo chat. # See: http://ridiculousfish.com/blog/posts/YahooChatRooms.html words = \"ahole,aholes,asshole,assholes,asswipe,biatch,bitch,bitches,blo_job,blow_job,blowjob,cocksucker,cunt,cunts,dickhead,fuck,fucked,fucking,fuckoff,fucks,handjob,handjobs,motherfucker,mother-fucker,motherfuckers,muthafucker,muthafuckers,nigga,niggs,nigger,niggers,pedofile,pedophile,phag,phuc,phuck,phucked,phucker,shat,shit,shits,shithead,shitter,shitting\".split(\",\") selection = random.sample(words,", "playing\"), (\"pop\", \"Remove the last item on the queue\"), (\"bump\", \"Move the last", "- Queue Youtube video\"\"\") commands={ 'do': do, 'suggest': suggest, } suggest_commands = [", "def do(self,message): message=message.strip() for (regex,func) in self.nlp_commands: m=re.match(regex,message,re.I) if m: result = yield", "the queue and play it\"), (\"say\", \"`say <quote>`: Say a quote and display", "\"video_id\": yi[\"id\"], # \"url\": \"http://www.youtube.com/watch?v={0}\".format(yi[\"id\"]), # \"title\": yi[\"snippet\"][\"title\"], # \"thumbnail\": yi[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"], # \"publish_time\":", "queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[0] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed", "(\"vol down\", \"Lower the volume\"), (\"skip\", \"Remove the current item on the queue", "the last item on the queue to top of the queue and play", "from bs4 import BeautifulSoup from urlparse import urlparse class NLP(service.JSONCommandProcessor, service.Service): port=settings.ports[\"nlp\"] queue_host='localhost'", "= urllib2.HTTPPasswordMgrWithDefaultRealm() try: password_mgr.add_password(None, bug_url, settings.github_login[0], settings.github_login[1]) except AttributeError: raise service.Return(u\"No github account", "[] for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}): video_ids.append(vid['href'][9:]) print(vid) sr = { \"video_id\": vid['href'], \"url\":", "(r'^video (.+)$',cmd_youtube_raw), (r'^bug (.+)$',cmd_bug), (r'(https?://.+(?:gif|jpe?g|png|bmp))',cmd_image), (r'(https?://.+)',cmd_youtube_raw), (r'^(.+)$',cmd_yt), ] nlp = NLP() def shutdown_handler(signum,frame):", "volume\"), (\"vol down\", \"Lower the volume\"), (\"skip\", \"Remove the current item on the", "Say a quote and display it on the screen\"), (\"fuck\", \"Swear a bunch\"),", "on the queue to top of the queue and play it\"), (\"say\", \"`say", "empty!\") mod=queue[0] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_rm_bot(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if", "yield self.youtube_search(q) results = [] for v in videos: results.append({ \"title\": u\"{0[title]} -", "suggest_commands = [ (\"vol up\", \"Raise the volume\"), (\"vol down\", \"Lower the volume\"),", "item playing bug - Submit a bug report Anything else - Queue Youtube", "subprocess.check_output(['/usr/games/fortune'] + fortune_args) data = { 'type': 'text', 'args': { 'text': fortune_text, #'screen_preprocessor':", "AttributeError: raise service.Return(u\"No github account configured in settings.json\") handler = urllib2.HTTPBasicAuthHandler(password_mgr) #TODO request(bug_url,", "self.nlp_commands: m=re.match(regex,message,re.I) if m: result = yield func(self,message,*m.groups()) raise service.Return(result) raise Exception(\"Command not", "import shmooze.settings as settings import urllib2 from bs4 import BeautifulSoup from urlparse import", "len(queue)==0: raise service.Return(\"Queue is empty.\") result = '\\n'.join([u\"{0}. {1}\".format(n+1,self.pretty(mod)) for (n,mod) in enumerate(queue)])", "#} #video_form_data = urllib.urlencode(video_data) #video_results = yield http_client.fetch(youtube_video_url + \"?\" + video_form_data) #video_json", "{ \"video_id\": vid['href'], \"url\": \"https://www.youtube.com/watch?v={0}\".format(vid['href'][9:]), \"title\": vid['title'], \"thumbnail\": \"no\", \"publish_time\": \"no\", \"views\": \"no\",", "the queue that is currently playing\"), (\"pop\", \"Remove the last item on the", "result for 'match' youtube_search_url = \"https://www.googleapis.com/youtube/v3/search\" search_data = { \"part\": \"snippet\", \"key\": self.youtube_api_key,", "in self.nlp_commands: m=re.match(regex,message,re.I) if m: result = yield func(self,message,*m.groups()) raise service.Return(result) raise Exception(\"Command", "raise Exception('No Youtube results found.') url = result[0][\"url\"] title = result[0][\"title\"] yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\":url}})", "of the queue and play it\"), (\"say\", \"`say <quote>`: Say a quote and", "for (n,mod) in enumerate(queue)]) raise service.Return(result) @service.coroutine def cmd_current(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0:", "raise Exception(\"Queue is empty!\") if len(queue)==1: raise Exception(\"Only one thing on the queue!\")", "youtube result for 'match' youtube_search_url = \"https://www.googleapis.com/youtube/v3/search\" search_data = { \"part\": \"snippet\", \"key\":", "empty!\") if len(queue)==1: raise Exception(\"Only one thing on the queue!\") old_uids=[mod['uid'] for mod", "http_client = tornado.httpclient.AsyncHTTPClient() # Return the args dict for the first youtube result", "\"max-results\": 5, \"q\": q, } query = q.replace(\" \", \"+\") url = \"https://www.youtube.com/results?search_query=\"", "len(stripped_message) }) rs = yield self.wildcard_suggest(message) suggestions.extend(rs) raise service.Return({'suggestions':suggestions}) @service.coroutine def queue_cmd(self,cmd,args={},assert_success=True): try:", "self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\":url}}) raise service.Return(u'Queued \"{0}\"'.format(title)) @service.coroutine def cmd_youtube_raw(self,q,url): yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.')", "service.Return(\"Volume set to {0}\".format(vol)) @service.coroutine def cmd_get_vol(self,q): result=yield self.vol_cmd(\"get_vol\") raise service.Return(\"Volume is {0}\".format(result.get(\"vol\",", "= json.loads(video_results.body) #output = [] #for yi in video_json['items']: # sr = {", "for v in videos: results.append({ \"title\": u\"{0[title]} - [{0[duration][1]}]\".format(v), \"action\": v['url'], \"help\": \"Play", "text + suffix}) password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() try: password_mgr.add_password(None, bug_url, settings.github_login[0], settings.github_login[1]) except AttributeError:", "queue\"), (\"bump\", \"Move the last item on the queue to top of the", "top video pop|undo|oops - Remove the bottom video bump - Move the bottom", "<url>`: Display an image on the screen as a background\"), (\"video\", \"`video <url>`:", "import urllib2 from bs4 import BeautifulSoup from urlparse import urlparse class NLP(service.JSONCommandProcessor, service.Service):", "= [] if text.startswith(\"http:\"): rs = yield self.url_suggest(text) results.extend(rs) yr = yield self.youtube_suggest(text)", "urllib2.urlopen(url) html = response.read() soup = BeautifulSoup(html, 'html.parser') video_ids = [] output =", "top\".format(self.pretty(mod_bot))) @service.coroutine def cmd_yt(self,q,kw): result=yield self.youtube_search(kw) if not result or not result[0]: raise", "= [] for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}): video_ids.append(vid['href'][9:]) print(vid) sr = { \"video_id\": vid['href'],", "a bug report Anything else - Queue Youtube video\"\"\") commands={ 'do': do, 'suggest':", "for (regex,func) in self.nlp_commands: m=re.match(regex,message,re.I) if m: result = yield func(self,message,*m.groups()) raise service.Return(result)", "video_ids.append(vid['href'][9:]) print(vid) sr = { \"video_id\": vid['href'], \"url\": \"https://www.youtube.com/watch?v={0}\".format(vid['href'][9:]), \"title\": vid['title'], \"thumbnail\": \"no\",", "\"url\": \"https://www.youtube.com/watch?v={0}\".format(vid['href'][9:]), \"title\": vid['title'], \"thumbnail\": \"no\", \"publish_time\": \"no\", \"views\": \"no\", \"duration\": (1,1)} output.append(sr)", "* 60 + s, human_str @service.coroutine def youtube_search(self,q): if q in self.youtube_cache: print", "except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with volume control.\") if assert_success: raise service.Return(packet.assert_success(result)) raise", "self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued swearing.') @service.coroutine def cmd_fortune(self, q): fortune_args = settings.get(\"fortune_args\", ['-s']) fortune_text", "'speech_preprocessor': 'pronounce_fortune', 'text2speech': 'google', 'text2screen': 'paragraph', #'renderer': 'mono_paragraph', 'duration': 5, } } yield", "(r'^say (.+)$',cmd_say), (r'^image (.+)$',cmd_image), (r'^youtube (.+)$',cmd_youtube_raw), (r'^video (.+)$',cmd_youtube_raw), (r'^bug (.+)$',cmd_bug), (r'(https?://.+(?:gif|jpe?g|png|bmp))',cmd_image), (r'(https?://.+)',cmd_youtube_raw), (r'^(.+)$',cmd_yt),", "= yield service.json_query(self.vol_host,self.vol_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise Exception(\"Error communicating with volume control.\") if assert_success:", "cmd_bump(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") if len(queue)==1: raise Exception(\"Only", "mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['title']) #if t=='netvid': # return u'{0}'.format(mod['parameters']['short_description']) if t=='text' and 'text' in", "bug_data = json.dumps({'title': text, 'body' : text + suffix}) password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() try:", "service import shmooze.settings as settings import urllib2 from bs4 import BeautifulSoup from urlparse", "http_client.fetch(youtube_video_url + \"?\" + video_form_data) #video_json = json.loads(video_results.body) #output = [] #for yi", "raise service.Return({'suggestions':suggestions}) @service.coroutine def queue_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.queue_host,self.queue_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise", "quote from the fortune database\"), (\"image\", \"`image <url>`: Display an image on the", "if h > 0: human_str = \"{0}:{1:02d}:{2:02d}\".format(h, m, s) else: human_str = \"{1}:{2:02d}\".format(h,", "- Set volume vol up|down - Change volume stop|stfu|skip|next - Remove the top", "[int(m.strip(\"HMS\")) if m is not None else 0 for m in matches] if", "vol - Get volume vol [num] - Set volume vol up|down - Change", "hms_str = dstr.strip() try: matches = re.match(r\"PT(\\d+H)?(\\d{1,2}M)?(\\d{1,2}S)\", hms_str).groups() except: print hms_str return 0,", "- Submit a bug report Anything else - Queue Youtube video\"\"\") commands={ 'do':", "json.loads(video_results.body) #output = [] #for yi in video_json['items']: # sr = { #", "h, m, s = [int(m.strip(\"HMS\")) if m is not None else 0 for", "self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_image(self,q,url): yield self.queue_cmd(\"set_bg\",{\"type\":\"image\",\"args\":{\"url\": url}}) raise service.Return(u'Queued", "stop|stfu|skip|next - Remove the top video pop|undo|oops - Remove the bottom video bump", "one thing on the queue!\") old_uids=[mod['uid'] for mod in queue] mod_bot=queue[-1] new_uids=old_uids[-1:]+old_uids[0:-1] yield", "'paragraph', #'renderer': 'mono_paragraph', 'duration': 5, } } yield self.queue_cmd(\"add\", data) raise service.Return(u\"Queued fortune.\")", "suggestions = [] for sc, sc_help in self.suggest_commands: if sc.startswith(stripped_message): suggestions.append({ \"title\": sc,", "service.Return(\"Volume is {0}\".format(result.get(\"vol\", \"unknown\"))) @service.coroutine def cmd_queue(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"Queue", "not recognized.\") #result = yield self.queue_cmd(\"queue\") raise service.Return({'message':'Did '+message}) def shutdown(self): service.ioloop.stop() @service.coroutine", "last item on the queue\"), (\"bump\", \"Move the last item on the queue", "cmd_current(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"(Nothing)\") result = self.pretty(queue[0]) raise service.Return(result) @service.coroutine", "= [{ \"title\": url, \"action\": url, \"help\": \"\", \"match\": len(url) }] yield service.Return(results)", "service.Return(result) @service.coroutine def cmd_current(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"(Nothing)\") result = self.pretty(queue[0])", "def cmd_help(self,q): raise service.Return(\"\"\"Commands I understand: help|? - This vol - Get volume", "socket import subprocess import tornado.httpclient import traceback import urllib import urllib2 import shmooze.lib.packet", "service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def vol_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.vol_host,self.vol_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError):", "\"order\": \"relevance\", \"safesearch\": \"none\", \"type\": \"video\", \"max-results\": 5, \"q\": q, } query =", "to the top q|queue - List the queue cur|current - Give the current", "video from YouTube\", \"match\": 0, }) raise service.Return(results) @service.coroutine def url_suggest(self, url): #TODO", "(\"say\", \"`say <quote>`: Say a quote and display it on the screen\"), (\"fuck\",", "vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}): video_ids.append(vid['href'][9:]) print(vid) sr = { \"video_id\": vid['href'], \"url\": \"https://www.youtube.com/watch?v={0}\".format(vid['href'][9:]), \"title\":", "if q in self.youtube_cache: print \"cache hit\" raise service.Return(self.youtube_cache[q]) print \"cache miss\" http_client", "\"\", \"match\": len(url) }] yield service.Return(results) @service.coroutine def wildcard_suggest(self, text): text = text.strip()", "= { \"part\": \"snippet\", \"key\": self.youtube_api_key, \"order\": \"relevance\", \"safesearch\": \"none\", \"type\": \"video\", \"max-results\":", "dstr.strip() try: matches = re.match(r\"PT(\\d+H)?(\\d{1,2}M)?(\\d{1,2}S)\", hms_str).groups() except: print hms_str return 0, hms_str h,", "\"+\") url = \"https://www.youtube.com/results?search_query=\" + query response = urllib2.urlopen(url) html = response.read() soup", "service.Return(u'Queued text.') @service.coroutine def cmd_say(self,q,text): yield self.queue_cmd(\"add\",{\"type\":\"text\",\"args\":{\"text\":text}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_swear(self,q):", "else - Queue Youtube video\"\"\") commands={ 'do': do, 'suggest': suggest, } suggest_commands =", "self.suggest_commands: if sc.startswith(stripped_message): suggestions.append({ \"title\": sc, \"action\": sc, \"help\": sc_help, \"match\": len(stripped_message) })", "{0}\".format(self.pretty(mod))) @service.coroutine def cmd_rm_bot(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[-1]", "Play a video\"), ] #TODO: splash nlp_commands=[ (r'^help$',cmd_help), (r'^$',cmd_help), (r'^\\?$',cmd_help), (r'^vol (\\d+|up|down)$',cmd_set_vol), (r'^vol$',cmd_get_vol),", "def shutdown_handler(signum,frame): print print \"Received signal, attempting graceful shutdown...\" service.ioloop.add_callback_from_signal(nlp.shutdown) signal.signal(signal.SIGTERM, shutdown_handler) signal.signal(signal.SIGINT,", "u\"{0[title]} - [{0[duration][1]}]\".format(v), \"action\": v['url'], \"help\": \"Play video from YouTube\", \"match\": 0, })", "in enumerate(queue)]) raise service.Return(result) @service.coroutine def cmd_current(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"(Nothing)\")", "'text', 'args': { 'text': fortune_text, #'screen_preprocessor': 'none', 'speech_preprocessor': 'pronounce_fortune', 'text2speech': 'google', 'text2screen': 'paragraph',", "urllib2.HTTPPasswordMgrWithDefaultRealm() try: password_mgr.add_password(None, bug_url, settings.github_login[0], settings.github_login[1]) except AttributeError: raise service.Return(u\"No github account configured", "if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def do(self,message): message=message.strip() for (regex,func) in", "= json.dumps({'title': text, 'body' : text + suffix}) password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() try: password_mgr.add_password(None,", "BeautifulSoup(html, 'html.parser') video_ids = [] output = [] for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}): video_ids.append(vid['href'][9:])", "raise Exception(\"Command not recognized.\") #result = yield self.queue_cmd(\"queue\") raise service.Return({'message':'Did '+message}) def shutdown(self):", "self.vol_cmd(\"get_vol\") vol=min(result['vol']+5,100) elif vol=='down': result=yield self.vol_cmd(\"get_vol\") vol=max(result['vol']-5,0) else: vol=int(vol) if vol>100: raise Exception(\"Volume", "#video_results = yield http_client.fetch(youtube_video_url + \"?\" + video_form_data) #video_json = json.loads(video_results.body) #output =", "videos = yield self.youtube_search(q) results = [] for v in videos: results.append({ \"title\":", "'type': 'text', 'args': { 'text': fortune_text, #'screen_preprocessor': 'none', 'speech_preprocessor': 'pronounce_fortune', 'text2speech': 'google', 'text2screen':", "(.+)$',cmd_say), (r'^image (.+)$',cmd_image), (r'^youtube (.+)$',cmd_youtube_raw), (r'^video (.+)$',cmd_youtube_raw), (r'^bug (.+)$',cmd_bug), (r'(https?://.+(?:gif|jpe?g|png|bmp))',cmd_image), (r'(https?://.+)',cmd_youtube_raw), (r'^(.+)$',cmd_yt), ]", "github account configured in settings.json\") handler = urllib2.HTTPBasicAuthHandler(password_mgr) #TODO request(bug_url, bug_data, auth=(musicazoo-bugs, musicaz00)", "def cmd_rm_bot(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[-1] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]})", "\"?\" + video_form_data) #video_json = json.loads(video_results.body) #output = [] #for yi in video_json['items']:", "yi[\"snippet\"][\"publishedAt\"], # \"views\": yi[\"statistics\"][\"viewCount\"], #\"duration\": self.parse_duration(yi[\"contentDetails\"][\"duration\"]), #} #output.append(sr) self.youtube_cache[q] = output raise service.Return(output)", "+ query response = urllib2.urlopen(url) html = response.read() soup = BeautifulSoup(html, 'html.parser') video_ids", "queue_port=settings.ports[\"queue\"] vol_host='localhost' vol_port=settings.ports[\"vol\"] pretty_params={'youtube':['title'], 'text': ['text']} youtube_api_key = settings.youtube_api_key def __init__(self): print \"NLP", "self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"(Nothing)\") result = self.pretty(queue[0]) raise service.Return(result) @service.coroutine def cmd_rm_top(self,q):", "cmd_queue(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"Queue is empty.\") result = '\\n'.join([u\"{0}. {1}\".format(n+1,self.pretty(mod))", "{ # \"video_id\": yi[\"id\"], # \"url\": \"http://www.youtube.com/watch?v={0}\".format(yi[\"id\"]), # \"title\": yi[\"snippet\"][\"title\"], # \"thumbnail\": yi[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"],", "'pronounce_fortune', 'text2speech': 'google', 'text2screen': 'paragraph', #'renderer': 'mono_paragraph', 'duration': 5, } } yield self.queue_cmd(\"add\",", "shmooze.settings as settings import urllib2 from bs4 import BeautifulSoup from urlparse import urlparse", "search_data = { \"part\": \"snippet\", \"key\": self.youtube_api_key, \"order\": \"relevance\", \"safesearch\": \"none\", \"type\": \"video\",", "\"https://www.youtube.com/results?search_query=\" + query response = urllib2.urlopen(url) html = response.read() soup = BeautifulSoup(html, 'html.parser')", "url_suggest(self, url): #TODO results = [{ \"title\": url, \"action\": url, \"help\": \"\", \"match\":", "data = { 'type': 'text', 'args': { 'text': fortune_text, #'screen_preprocessor': 'none', 'speech_preprocessor': 'pronounce_fortune',", "= \"https://api.github.com/repos/zbanks/musicazoo/issues\" suffix = \"\\n\\nSubmitted via NLP service.\" bug_data = json.dumps({'title': text, 'body'", "\"http://www.youtube.com/watch?v={0}\".format(yi[\"id\"]), # \"title\": yi[\"snippet\"][\"title\"], # \"thumbnail\": yi[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"], # \"publish_time\": yi[\"snippet\"][\"publishedAt\"], # \"views\": yi[\"statistics\"][\"viewCount\"],", "%s - thanks!') @service.coroutine def cmd_help(self,q): raise service.Return(\"\"\"Commands I understand: help|? - This", "= urllib2.HTTPBasicAuthHandler(password_mgr) #TODO request(bug_url, bug_data, auth=(musicazoo-bugs, musicaz00) raise service.Return(u'Submitted bug: %s - thanks!')", "(r'^youtube (.+)$',cmd_youtube_raw), (r'^video (.+)$',cmd_youtube_raw), (r'^bug (.+)$',cmd_bug), (r'(https?://.+(?:gif|jpe?g|png|bmp))',cmd_image), (r'(https?://.+)',cmd_youtube_raw), (r'^(.+)$',cmd_yt), ] nlp = NLP()", "# \"key\": self.youtube_api_key, # \"id\": \",\".join(video_ids), #} #video_form_data = urllib.urlencode(video_data) #video_results = yield", "playing bug - Submit a bug report Anything else - Queue Youtube video\"\"\")", "raise Exception(\"Only one thing on the queue!\") old_uids=[mod['uid'] for mod in queue] mod_bot=queue[-1]", "Swear words according to yahoo chat. # See: http://ridiculousfish.com/blog/posts/YahooChatRooms.html words = \"ahole,aholes,asshole,assholes,asswipe,biatch,bitch,bitches,blo_job,blow_job,blowjob,cocksucker,cunt,cunts,dickhead,fuck,fucked,fucking,fuckoff,fucks,handjob,handjobs,motherfucker,mother-fucker,motherfuckers,muthafucker,muthafuckers,nigga,niggs,nigger,niggers,pedofile,pedophile,phag,phuc,phuck,phucked,phucker,shat,shit,shits,shithead,shitter,shitting\".split(\",\") selection", "quote and display it on the screen\"), (\"fuck\", \"Swear a bunch\"), (\"quote\", \"Display", "raise service.Return(u'Queued text.') @service.coroutine def cmd_image(self,q,url): yield self.queue_cmd(\"set_bg\",{\"type\":\"image\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine", "it on the screen\"), (\"fuck\", \"Swear a bunch\"), (\"quote\", \"Display a quote from", "in self.youtube_cache: print \"cache hit\" raise service.Return(self.youtube_cache[q]) print \"cache miss\" http_client = tornado.httpclient.AsyncHTTPClient()", "100\") yield self.vol_cmd(\"set_vol\",{\"vol\":vol}) raise service.Return(\"Volume set to {0}\".format(vol)) @service.coroutine def cmd_get_vol(self,q): result=yield self.vol_cmd(\"get_vol\")", "raise service.Return(u'Queued text.') @service.coroutine def cmd_swear(self,q): # Swear words according to yahoo chat.", "q|queue - List the queue cur|current - Give the current item playing bug", "(r'^next$',cmd_rm_top), (r'^pop$',cmd_rm_bot), (r'^undo$',cmd_rm_bot), (r'^oops$',cmd_rm_bot), (r'^bump$',cmd_bump), (r'^fuck$',cmd_swear), (r'^fortune$',cmd_fortune), (r'^quote$',cmd_fortune), (r'^q$',cmd_queue), (r'^queue$',cmd_queue), (r'^cur(?:rent)?$',cmd_current), (r'^say (.+)$',cmd_say),", "\"NLP started.\" self.youtube_cache = {} super(NLP, self).__init__() @staticmethod def parse_duration(dstr): # Parse ISO", "and display it on the screen\"), (\"fuck\", \"Swear a bunch\"), (\"quote\", \"Display a", "cmd_swear(self,q): # Swear words according to yahoo chat. # See: http://ridiculousfish.com/blog/posts/YahooChatRooms.html words =", "service.Return(u'Queued \"{0}\"'.format(title)) @service.coroutine def cmd_youtube_raw(self,q,url): yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine def", "self.parse_duration(yi[\"contentDetails\"][\"duration\"]), #} #output.append(sr) self.youtube_cache[q] = output raise service.Return(output) @service.coroutine def youtube_suggest(self, q): videos", "{ # \"part\": \"contentDetails,snippet,statistics\", # \"key\": self.youtube_api_key, # \"id\": \",\".join(video_ids), #} #video_form_data =", "self.queue_cmd(\"queue\") raise service.Return({'message':'Did '+message}) def shutdown(self): service.ioloop.stop() @service.coroutine def cmd_set_vol(self,q,vol): if vol=='up': result=yield", "- Move the bottom video to the top q|queue - List the queue", "duration strings: PT#M#S hms_str = dstr.strip() try: matches = re.match(r\"PT(\\d+H)?(\\d{1,2}M)?(\\d{1,2}S)\", hms_str).groups() except: print", "v in videos: results.append({ \"title\": u\"{0[title]} - [{0[duration][1]}]\".format(v), \"action\": v['url'], \"help\": \"Play video", "urllib2.HTTPBasicAuthHandler(password_mgr) #TODO request(bug_url, bug_data, auth=(musicazoo-bugs, musicaz00) raise service.Return(u'Submitted bug: %s - thanks!') @service.coroutine", "@service.coroutine def cmd_bump(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise Exception(\"Queue is empty!\") if len(queue)==1:", "raise service.Return(result) @service.coroutine def cmd_current(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise service.Return(\"(Nothing)\") result =", "greater than 100\") yield self.vol_cmd(\"set_vol\",{\"vol\":vol}) raise service.Return(\"Volume set to {0}\".format(vol)) @service.coroutine def cmd_get_vol(self,q):", "video_ids = [] output = [] for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}): video_ids.append(vid['href'][9:]) print(vid) sr", "<url>`: Play a video\"), ] #TODO: splash nlp_commands=[ (r'^help$',cmd_help), (r'^$',cmd_help), (r'^\\?$',cmd_help), (r'^vol (\\d+|up|down)$',cmd_set_vol),", "q in self.youtube_cache: print \"cache hit\" raise service.Return(self.youtube_cache[q]) print \"cache miss\" http_client =", "is not None else 0 for m in matches] if h > 0:", "it\"), (\"say\", \"`say <quote>`: Say a quote and display it on the screen\"),", "to {0}\".format(vol)) @service.coroutine def cmd_get_vol(self,q): result=yield self.vol_cmd(\"get_vol\") raise service.Return(\"Volume is {0}\".format(result.get(\"vol\", \"unknown\"))) @service.coroutine", "queue!\") old_uids=[mod['uid'] for mod in queue] mod_bot=queue[-1] new_uids=old_uids[-1:]+old_uids[0:-1] yield self.queue_cmd(\"mv\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Bumped {0}", "to the top\".format(self.pretty(mod_bot))) @service.coroutine def cmd_yt(self,q,kw): result=yield self.youtube_search(kw) if not result or not", "yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\":url}}) raise service.Return(u'Queued \"{0}\"'.format(title)) @service.coroutine def cmd_youtube_raw(self,q,url): yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\": url}}) raise service.Return(u'Queued", "def cmd_youtube_raw(self,q,url): yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\": url}}) raise service.Return(u'Queued text.') @service.coroutine def cmd_image(self,q,url): yield self.queue_cmd(\"set_bg\",{\"type\":\"image\",\"args\":{\"url\":", "and play it\"), (\"say\", \"`say <quote>`: Say a quote and display it on", "raise service.Return(result) @service.coroutine def vol_cmd(self,cmd,args={},assert_success=True): try: result = yield service.json_query(self.vol_host,self.vol_port,{\"cmd\":cmd,\"args\":args}) except (socket.error,service.TimeoutError): raise", "in soup.findAll(attrs={'class':'yt-uix-tile-link'}): video_ids.append(vid['href'][9:]) print(vid) sr = { \"video_id\": vid['href'], \"url\": \"https://www.youtube.com/watch?v={0}\".format(vid['href'][9:]), \"title\": vid['title'],", "the first youtube result for 'match' youtube_search_url = \"https://www.googleapis.com/youtube/v3/search\" search_data = { \"part\":", "@service.coroutine def cmd_yt(self,q,kw): result=yield self.youtube_search(kw) if not result or not result[0]: raise Exception('No", "cmd_set_vol(self,q,vol): if vol=='up': result=yield self.vol_cmd(\"get_vol\") vol=min(result['vol']+5,100) elif vol=='down': result=yield self.vol_cmd(\"get_vol\") vol=max(result['vol']-5,0) else: vol=int(vol)", "5, \"q\": q, } query = q.replace(\" \", \"+\") url = \"https://www.youtube.com/results?search_query=\" +", "\"video\", \"max-results\": 5, \"q\": q, } query = q.replace(\" \", \"+\") url =", "'match' youtube_search_url = \"https://www.googleapis.com/youtube/v3/search\" search_data = { \"part\": \"snippet\", \"key\": self.youtube_api_key, \"order\": \"relevance\",", "@service.coroutine def cmd_set_vol(self,q,vol): if vol=='up': result=yield self.vol_cmd(\"get_vol\") vol=min(result['vol']+5,100) elif vol=='down': result=yield self.vol_cmd(\"get_vol\") vol=max(result['vol']-5,0)", "service.Return(u\"Bumped {0} to the top\".format(self.pretty(mod_bot))) @service.coroutine def cmd_yt(self,q,kw): result=yield self.youtube_search(kw) if not result", "cmd_bug(self,q,text): bug_url = \"https://api.github.com/repos/zbanks/musicazoo/issues\" suffix = \"\\n\\nSubmitted via NLP service.\" bug_data = json.dumps({'title':", "m=re.match(regex,message,re.I) if m: result = yield func(self,message,*m.groups()) raise service.Return(result) raise Exception(\"Command not recognized.\")", "(\\d+|up|down)$',cmd_set_vol), (r'^vol$',cmd_get_vol), (r'^stop$',cmd_rm_top), (r'^stfu$',cmd_rm_top), (r'^skip$',cmd_rm_top), (r'^next$',cmd_rm_top), (r'^pop$',cmd_rm_bot), (r'^undo$',cmd_rm_bot), (r'^oops$',cmd_rm_bot), (r'^bump$',cmd_bump), (r'^fuck$',cmd_swear), (r'^fortune$',cmd_fortune), (r'^quote$',cmd_fortune),", "raise service.Return(\"Volume is {0}\".format(result.get(\"vol\", \"unknown\"))) @service.coroutine def cmd_queue(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0: raise", "self).__init__() @staticmethod def parse_duration(dstr): # Parse ISO 8601 duration strings: PT#M#S hms_str =", "#output = [] #for yi in video_json['items']: # sr = { # \"video_id\":", "Exception(\"Error communicating with volume control.\") if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def", "bug: %s - thanks!') @service.coroutine def cmd_help(self,q): raise service.Return(\"\"\"Commands I understand: help|? -", "service.Return(u\"No github account configured in settings.json\") handler = urllib2.HTTPBasicAuthHandler(password_mgr) #TODO request(bug_url, bug_data, auth=(musicazoo-bugs,", "= BeautifulSoup(html, 'html.parser') video_ids = [] output = [] for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}):", "html = response.read() soup = BeautifulSoup(html, 'html.parser') video_ids = [] output = []", "auth=(musicazoo-bugs, musicaz00) raise service.Return(u'Submitted bug: %s - thanks!') @service.coroutine def cmd_help(self,q): raise service.Return(\"\"\"Commands", "sc_help in self.suggest_commands: if sc.startswith(stripped_message): suggestions.append({ \"title\": sc, \"action\": sc, \"help\": sc_help, \"match\":", "# return u'{0}'.format(mod['parameters']['short_description']) if t=='text' and 'text' in mod['parameters']: return u'\"{0}\"'.format(mod['parameters']['text']) return u'({0})'.format(t)", "def cmd_bug(self,q,text): bug_url = \"https://api.github.com/repos/zbanks/musicazoo/issues\" suffix = \"\\n\\nSubmitted via NLP service.\" bug_data =", "results = [{ \"title\": url, \"action\": url, \"help\": \"\", \"match\": len(url) }] yield", "url, \"action\": url, \"help\": \"\", \"match\": len(url) }] yield service.Return(results) @service.coroutine def wildcard_suggest(self,", "sc_help, \"match\": len(stripped_message) }) rs = yield self.wildcard_suggest(message) suggestions.extend(rs) raise service.Return({'suggestions':suggestions}) @service.coroutine def", "if len(queue)==0: raise service.Return(\"(Nothing)\") result = self.pretty(queue[0]) raise service.Return(result) @service.coroutine def cmd_rm_top(self,q): queue=yield", "'none', 'speech_preprocessor': 'pronounce_fortune', 'text2speech': 'google', 'text2screen': 'paragraph', #'renderer': 'mono_paragraph', 'duration': 5, } }", "suggest, } suggest_commands = [ (\"vol up\", \"Raise the volume\"), (\"vol down\", \"Lower", "with volume control.\") if assert_success: raise service.Return(packet.assert_success(result)) raise service.Return(result) @service.coroutine def do(self,message): message=message.strip()", "len(url) }] yield service.Return(results) @service.coroutine def wildcard_suggest(self, text): text = text.strip() results =", "if len(queue)==0: raise Exception(\"Queue is empty!\") mod=queue[0] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine", "] nlp = NLP() def shutdown_handler(signum,frame): print print \"Received signal, attempting graceful shutdown...\"", "8601 duration strings: PT#M#S hms_str = dstr.strip() try: matches = re.match(r\"PT(\\d+H)?(\\d{1,2}M)?(\\d{1,2}S)\", hms_str).groups() except:", "'+message}) def shutdown(self): service.ioloop.stop() @service.coroutine def cmd_set_vol(self,q,vol): if vol=='up': result=yield self.vol_cmd(\"get_vol\") vol=min(result['vol']+5,100) elif", "(\"pop\", \"Remove the last item on the queue\"), (\"bump\", \"Move the last item", "v['url'], \"help\": \"Play video from YouTube\", \"match\": 0, }) raise service.Return(results) @service.coroutine def", "= [] for sc, sc_help in self.suggest_commands: if sc.startswith(stripped_message): suggestions.append({ \"title\": sc, \"action\":", "matches = re.match(r\"PT(\\d+H)?(\\d{1,2}M)?(\\d{1,2}S)\", hms_str).groups() except: print hms_str return 0, hms_str h, m, s", "h * 360 + m * 60 + s, human_str @service.coroutine def youtube_search(self,q):", "service.Service): port=settings.ports[\"nlp\"] queue_host='localhost' queue_port=settings.ports[\"queue\"] vol_host='localhost' vol_port=settings.ports[\"vol\"] pretty_params={'youtube':['title'], 'text': ['text']} youtube_api_key = settings.youtube_api_key def", "response.read() soup = BeautifulSoup(html, 'html.parser') video_ids = [] output = [] for vid", "= { # \"video_id\": yi[\"id\"], # \"url\": \"http://www.youtube.com/watch?v={0}\".format(yi[\"id\"]), # \"title\": yi[\"snippet\"][\"title\"], # \"thumbnail\":", "Get volume vol [num] - Set volume vol up|down - Change volume stop|stfu|skip|next", "[ (\"vol up\", \"Raise the volume\"), (\"vol down\", \"Lower the volume\"), (\"skip\", \"Remove", "result[0][\"url\"] title = result[0][\"title\"] yield self.queue_cmd(\"add\",{\"type\":\"youtube\",\"args\":{\"url\":url}}) raise service.Return(u'Queued \"{0}\"'.format(title)) @service.coroutine def cmd_youtube_raw(self,q,url): yield", "service.Return(result) raise Exception(\"Command not recognized.\") #result = yield self.queue_cmd(\"queue\") raise service.Return({'message':'Did '+message}) def", "[] for v in videos: results.append({ \"title\": u\"{0[title]} - [{0[duration][1]}]\".format(v), \"action\": v['url'], \"help\":", "text.') @service.coroutine def cmd_swear(self,q): # Swear words according to yahoo chat. # See:", "the top q|queue - List the queue cur|current - Give the current item", "result = '\\n'.join([u\"{0}. {1}\".format(n+1,self.pretty(mod)) for (n,mod) in enumerate(queue)]) raise service.Return(result) @service.coroutine def cmd_current(self,q):", "on the queue!\") old_uids=[mod['uid'] for mod in queue] mod_bot=queue[-1] new_uids=old_uids[-1:]+old_uids[0:-1] yield self.queue_cmd(\"mv\",{\"uids\":[mod['uid']]}) raise", "= q.replace(\" \", \"+\") url = \"https://www.youtube.com/results?search_query=\" + query response = urllib2.urlopen(url) html", "cmd_yt(self,q,kw): result=yield self.youtube_search(kw) if not result or not result[0]: raise Exception('No Youtube results", "mod=queue[0] yield self.queue_cmd(\"rm\",{\"uids\":[mod['uid']]}) raise service.Return(u\"Removed {0}\".format(self.pretty(mod))) @service.coroutine def cmd_rm_bot(self,q): queue=yield self.queue_cmd(\"queue\",{\"parameters\":self.pretty_params}) if len(queue)==0:", "do, 'suggest': suggest, } suggest_commands = [ (\"vol up\", \"Raise the volume\"), (\"vol", "\"video_id\": vid['href'], \"url\": \"https://www.youtube.com/watch?v={0}\".format(vid['href'][9:]), \"title\": vid['title'], \"thumbnail\": \"no\", \"publish_time\": \"no\", \"views\": \"no\", \"duration\":", "do(self,message): message=message.strip() for (regex,func) in self.nlp_commands: m=re.match(regex,message,re.I) if m: result = yield func(self,message,*m.groups())" ]
[ "= ServiceAccountCredentials.from_json_keyfile_name( 'credentials.json', [ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive', ], ) httpAuth = credentials.authorize(httplib2.Http()) service =", "fileId = spreadsheetId, body = { 'type': 'user', 'role': 'writer', 'emailAddress': mail, },", "'sheetId': i, 'title': sheet, 'gridProperties': { 'rowCount': 100, 'columnCount': 30, }, }, }", "'credentials.json', [ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive', ], ) httpAuth = credentials.authorize(httplib2.Http()) service = apiclient.discovery.build('sheets', 'v4',", "30, }, }, } for i, sheet in enumerate(sheets)], }, ).execute() spreadsheetId =", "body = { 'properties': { 'title': name, 'locale': 'ru_RU', }, 'sheets': [{ 'properties':", "'https://www.googleapis.com/auth/drive', ], ) httpAuth = credentials.authorize(httplib2.Http()) service = apiclient.discovery.build('sheets', 'v4', http=httpAuth) def create(name,", "'v4', http=httpAuth) def create(name, sheets, mail): spreadsheet = service.spreadsheets().create( body = { 'properties':", "= service.spreadsheets().create( body = { 'properties': { 'title': name, 'locale': 'ru_RU', }, 'sheets':", "apiclient.discovery.build('drive', 'v3', http=httpAuth) driveService.permissions().create( fileId = spreadsheetId, body = { 'type': 'user', 'role':", "= apiclient.discovery.build('sheets', 'v4', http=httpAuth) def create(name, sheets, mail): spreadsheet = service.spreadsheets().create( body =", "http=httpAuth) def create(name, sheets, mail): spreadsheet = service.spreadsheets().create( body = { 'properties': {", ").execute() spreadsheetId = spreadsheet['spreadsheetId'] print(f\"https://docs.google.com/spreadsheets/d/{spreadsheetId}\") driveService = apiclient.discovery.build('drive', 'v3', http=httpAuth) driveService.permissions().create( fileId =", "body = { 'type': 'user', 'role': 'writer', 'emailAddress': mail, }, fields = 'id'", "= spreadsheet['spreadsheetId'] print(f\"https://docs.google.com/spreadsheets/d/{spreadsheetId}\") driveService = apiclient.discovery.build('drive', 'v3', http=httpAuth) driveService.permissions().create( fileId = spreadsheetId, body", "httplib2 import apiclient.discovery from oauth2client.service_account import ServiceAccountCredentials credentials = ServiceAccountCredentials.from_json_keyfile_name( 'credentials.json', [ 'https://www.googleapis.com/auth/spreadsheets',", "} for i, sheet in enumerate(sheets)], }, ).execute() spreadsheetId = spreadsheet['spreadsheetId'] print(f\"https://docs.google.com/spreadsheets/d/{spreadsheetId}\") driveService", "{ 'properties': { 'title': name, 'locale': 'ru_RU', }, 'sheets': [{ 'properties': { 'sheetType':", "service.spreadsheets().create( body = { 'properties': { 'title': name, 'locale': 'ru_RU', }, 'sheets': [{", "= spreadsheetId, body = { 'type': 'user', 'role': 'writer', 'emailAddress': mail, }, fields", "in enumerate(sheets)], }, ).execute() spreadsheetId = spreadsheet['spreadsheetId'] print(f\"https://docs.google.com/spreadsheets/d/{spreadsheetId}\") driveService = apiclient.discovery.build('drive', 'v3', http=httpAuth)", "driveService.permissions().create( fileId = spreadsheetId, body = { 'type': 'user', 'role': 'writer', 'emailAddress': mail,", "}, 'sheets': [{ 'properties': { 'sheetType': 'GRID', 'sheetId': i, 'title': sheet, 'gridProperties': {", "def create(name, sheets, mail): spreadsheet = service.spreadsheets().create( body = { 'properties': { 'title':", "'properties': { 'sheetType': 'GRID', 'sheetId': i, 'title': sheet, 'gridProperties': { 'rowCount': 100, 'columnCount':", "= credentials.authorize(httplib2.Http()) service = apiclient.discovery.build('sheets', 'v4', http=httpAuth) def create(name, sheets, mail): spreadsheet =", "sheet in enumerate(sheets)], }, ).execute() spreadsheetId = spreadsheet['spreadsheetId'] print(f\"https://docs.google.com/spreadsheets/d/{spreadsheetId}\") driveService = apiclient.discovery.build('drive', 'v3',", "'ru_RU', }, 'sheets': [{ 'properties': { 'sheetType': 'GRID', 'sheetId': i, 'title': sheet, 'gridProperties':", "oauth2client.service_account import ServiceAccountCredentials credentials = ServiceAccountCredentials.from_json_keyfile_name( 'credentials.json', [ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive', ], ) httpAuth", "'title': sheet, 'gridProperties': { 'rowCount': 100, 'columnCount': 30, }, }, } for i,", "enumerate(sheets)], }, ).execute() spreadsheetId = spreadsheet['spreadsheetId'] print(f\"https://docs.google.com/spreadsheets/d/{spreadsheetId}\") driveService = apiclient.discovery.build('drive', 'v3', http=httpAuth) driveService.permissions().create(", "apiclient.discovery.build('sheets', 'v4', http=httpAuth) def create(name, sheets, mail): spreadsheet = service.spreadsheets().create( body = {", "i, 'title': sheet, 'gridProperties': { 'rowCount': 100, 'columnCount': 30, }, }, } for", "from oauth2client.service_account import ServiceAccountCredentials credentials = ServiceAccountCredentials.from_json_keyfile_name( 'credentials.json', [ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive', ], )", "], ) httpAuth = credentials.authorize(httplib2.Http()) service = apiclient.discovery.build('sheets', 'v4', http=httpAuth) def create(name, sheets,", "[{ 'properties': { 'sheetType': 'GRID', 'sheetId': i, 'title': sheet, 'gridProperties': { 'rowCount': 100,", "{ 'sheetType': 'GRID', 'sheetId': i, 'title': sheet, 'gridProperties': { 'rowCount': 100, 'columnCount': 30,", "functionality for the API \"\"\" import httplib2 import apiclient.discovery from oauth2client.service_account import ServiceAccountCredentials", "'locale': 'ru_RU', }, 'sheets': [{ 'properties': { 'sheetType': 'GRID', 'sheetId': i, 'title': sheet,", "'sheetType': 'GRID', 'sheetId': i, 'title': sheet, 'gridProperties': { 'rowCount': 100, 'columnCount': 30, },", "'v3', http=httpAuth) driveService.permissions().create( fileId = spreadsheetId, body = { 'type': 'user', 'role': 'writer',", "{ 'title': name, 'locale': 'ru_RU', }, 'sheets': [{ 'properties': { 'sheetType': 'GRID', 'sheetId':", "Google Documents functionality for the API \"\"\" import httplib2 import apiclient.discovery from oauth2client.service_account", "credentials.authorize(httplib2.Http()) service = apiclient.discovery.build('sheets', 'v4', http=httpAuth) def create(name, sheets, mail): spreadsheet = service.spreadsheets().create(", "\"\"\" Google Documents functionality for the API \"\"\" import httplib2 import apiclient.discovery from", "httpAuth = credentials.authorize(httplib2.Http()) service = apiclient.discovery.build('sheets', 'v4', http=httpAuth) def create(name, sheets, mail): spreadsheet", "'GRID', 'sheetId': i, 'title': sheet, 'gridProperties': { 'rowCount': 100, 'columnCount': 30, }, },", "}, } for i, sheet in enumerate(sheets)], }, ).execute() spreadsheetId = spreadsheet['spreadsheetId'] print(f\"https://docs.google.com/spreadsheets/d/{spreadsheetId}\")", "i, sheet in enumerate(sheets)], }, ).execute() spreadsheetId = spreadsheet['spreadsheetId'] print(f\"https://docs.google.com/spreadsheets/d/{spreadsheetId}\") driveService = apiclient.discovery.build('drive',", "import ServiceAccountCredentials credentials = ServiceAccountCredentials.from_json_keyfile_name( 'credentials.json', [ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive', ], ) httpAuth =", "the API \"\"\" import httplib2 import apiclient.discovery from oauth2client.service_account import ServiceAccountCredentials credentials =", "apiclient.discovery from oauth2client.service_account import ServiceAccountCredentials credentials = ServiceAccountCredentials.from_json_keyfile_name( 'credentials.json', [ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive', ],", "spreadsheet['spreadsheetId'] print(f\"https://docs.google.com/spreadsheets/d/{spreadsheetId}\") driveService = apiclient.discovery.build('drive', 'v3', http=httpAuth) driveService.permissions().create( fileId = spreadsheetId, body =", "'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive', ], ) httpAuth = credentials.authorize(httplib2.Http()) service = apiclient.discovery.build('sheets', 'v4', http=httpAuth) def", "'sheets': [{ 'properties': { 'sheetType': 'GRID', 'sheetId': i, 'title': sheet, 'gridProperties': { 'rowCount':", "= { 'properties': { 'title': name, 'locale': 'ru_RU', }, 'sheets': [{ 'properties': {", "http=httpAuth) driveService.permissions().create( fileId = spreadsheetId, body = { 'type': 'user', 'role': 'writer', 'emailAddress':", "'title': name, 'locale': 'ru_RU', }, 'sheets': [{ 'properties': { 'sheetType': 'GRID', 'sheetId': i,", "service = apiclient.discovery.build('sheets', 'v4', http=httpAuth) def create(name, sheets, mail): spreadsheet = service.spreadsheets().create( body", "import httplib2 import apiclient.discovery from oauth2client.service_account import ServiceAccountCredentials credentials = ServiceAccountCredentials.from_json_keyfile_name( 'credentials.json', [", "'columnCount': 30, }, }, } for i, sheet in enumerate(sheets)], }, ).execute() spreadsheetId", "Documents functionality for the API \"\"\" import httplib2 import apiclient.discovery from oauth2client.service_account import", "}, }, } for i, sheet in enumerate(sheets)], }, ).execute() spreadsheetId = spreadsheet['spreadsheetId']", "{ 'rowCount': 100, 'columnCount': 30, }, }, } for i, sheet in enumerate(sheets)],", "credentials = ServiceAccountCredentials.from_json_keyfile_name( 'credentials.json', [ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive', ], ) httpAuth = credentials.authorize(httplib2.Http()) service", "ServiceAccountCredentials.from_json_keyfile_name( 'credentials.json', [ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive', ], ) httpAuth = credentials.authorize(httplib2.Http()) service = apiclient.discovery.build('sheets',", "= { 'type': 'user', 'role': 'writer', 'emailAddress': mail, }, fields = 'id' ).execute()", "name, 'locale': 'ru_RU', }, 'sheets': [{ 'properties': { 'sheetType': 'GRID', 'sheetId': i, 'title':", "spreadsheetId = spreadsheet['spreadsheetId'] print(f\"https://docs.google.com/spreadsheets/d/{spreadsheetId}\") driveService = apiclient.discovery.build('drive', 'v3', http=httpAuth) driveService.permissions().create( fileId = spreadsheetId,", "API \"\"\" import httplib2 import apiclient.discovery from oauth2client.service_account import ServiceAccountCredentials credentials = ServiceAccountCredentials.from_json_keyfile_name(", "100, 'columnCount': 30, }, }, } for i, sheet in enumerate(sheets)], }, ).execute()", "import apiclient.discovery from oauth2client.service_account import ServiceAccountCredentials credentials = ServiceAccountCredentials.from_json_keyfile_name( 'credentials.json', [ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive',", "}, ).execute() spreadsheetId = spreadsheet['spreadsheetId'] print(f\"https://docs.google.com/spreadsheets/d/{spreadsheetId}\") driveService = apiclient.discovery.build('drive', 'v3', http=httpAuth) driveService.permissions().create( fileId", "driveService = apiclient.discovery.build('drive', 'v3', http=httpAuth) driveService.permissions().create( fileId = spreadsheetId, body = { 'type':", "ServiceAccountCredentials credentials = ServiceAccountCredentials.from_json_keyfile_name( 'credentials.json', [ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive', ], ) httpAuth = credentials.authorize(httplib2.Http())", "spreadsheetId, body = { 'type': 'user', 'role': 'writer', 'emailAddress': mail, }, fields =", "for i, sheet in enumerate(sheets)], }, ).execute() spreadsheetId = spreadsheet['spreadsheetId'] print(f\"https://docs.google.com/spreadsheets/d/{spreadsheetId}\") driveService =", "mail): spreadsheet = service.spreadsheets().create( body = { 'properties': { 'title': name, 'locale': 'ru_RU',", "= apiclient.discovery.build('drive', 'v3', http=httpAuth) driveService.permissions().create( fileId = spreadsheetId, body = { 'type': 'user',", "create(name, sheets, mail): spreadsheet = service.spreadsheets().create( body = { 'properties': { 'title': name,", "sheets, mail): spreadsheet = service.spreadsheets().create( body = { 'properties': { 'title': name, 'locale':", "'properties': { 'title': name, 'locale': 'ru_RU', }, 'sheets': [{ 'properties': { 'sheetType': 'GRID',", "'gridProperties': { 'rowCount': 100, 'columnCount': 30, }, }, } for i, sheet in", "print(f\"https://docs.google.com/spreadsheets/d/{spreadsheetId}\") driveService = apiclient.discovery.build('drive', 'v3', http=httpAuth) driveService.permissions().create( fileId = spreadsheetId, body = {", "for the API \"\"\" import httplib2 import apiclient.discovery from oauth2client.service_account import ServiceAccountCredentials credentials", "'rowCount': 100, 'columnCount': 30, }, }, } for i, sheet in enumerate(sheets)], },", "\"\"\" import httplib2 import apiclient.discovery from oauth2client.service_account import ServiceAccountCredentials credentials = ServiceAccountCredentials.from_json_keyfile_name( 'credentials.json',", "[ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive', ], ) httpAuth = credentials.authorize(httplib2.Http()) service = apiclient.discovery.build('sheets', 'v4', http=httpAuth)", "sheet, 'gridProperties': { 'rowCount': 100, 'columnCount': 30, }, }, } for i, sheet", ") httpAuth = credentials.authorize(httplib2.Http()) service = apiclient.discovery.build('sheets', 'v4', http=httpAuth) def create(name, sheets, mail):", "spreadsheet = service.spreadsheets().create( body = { 'properties': { 'title': name, 'locale': 'ru_RU', }," ]
[ "settings['bridge']) return \"Success\", 200 def main(): global settings parser = ArgumentParser() parser.add_argument(\"--bridge\", type=str)", "return if len(addrs) == 0: logging.error(\"Interface {} has no IP4 address.\".format(args.interface)) return settings['localIP']", "if len(addrs) == 0: logging.error(\"Interface {} has no IP4 address.\".format(args.interface)) return settings['localIP'] =", "= ArgumentParser() parser.add_argument(\"--bridge\", type=str) parser.add_argument(\"interface\", type=str) args = parser.parse_args() addrs = addressesForInterface(args.interface) if", "parser.parse_args() addrs = addressesForInterface(args.interface) if addrs is None: logging.error(\"No such interface: {}\".format(args.interface)) return", "type=str) args = parser.parse_args() addrs = addressesForInterface(args.interface) if addrs is None: logging.error(\"No such", "if addrs is None: logging.error(\"No such interface: {}\".format(args.interface)) return if len(addrs) == 0:", "request.remote_addr logging.info(\"Connect request from {}\".format(address)) startTunnel(address, settings['localIP'], settings['bridge']) return \"Success\", 200 def main():", "@app.route(\"/connect\") def connect(): address = request.remote_addr logging.info(\"Connect request from {}\".format(address)) startTunnel(address, settings['localIP'], settings['bridge'])", "connect(): address = request.remote_addr logging.info(\"Connect request from {}\".format(address)) startTunnel(address, settings['localIP'], settings['bridge']) return \"Success\",", "{}\".format(address)) startTunnel(address, settings['localIP'], settings['bridge']) return \"Success\", 200 def main(): global settings parser =", "{} has no IP4 address.\".format(args.interface)) return settings['localIP'] = addrs[0] settings['bridge'] = args.bridge app.run(host=addrs[0])", "Flask(__name__) settings = {} @app.route(\"/connect\") def connect(): address = request.remote_addr logging.info(\"Connect request from", "addrs = addressesForInterface(args.interface) if addrs is None: logging.error(\"No such interface: {}\".format(args.interface)) return if", "{} @app.route(\"/connect\") def connect(): address = request.remote_addr logging.info(\"Connect request from {}\".format(address)) startTunnel(address, settings['localIP'],", "main(): global settings parser = ArgumentParser() parser.add_argument(\"--bridge\", type=str) parser.add_argument(\"interface\", type=str) args = parser.parse_args()", "0: logging.error(\"Interface {} has no IP4 address.\".format(args.interface)) return settings['localIP'] = addrs[0] settings['bridge'] =", "def connect(): address = request.remote_addr logging.info(\"Connect request from {}\".format(address)) startTunnel(address, settings['localIP'], settings['bridge']) return", "app = Flask(__name__) settings = {} @app.route(\"/connect\") def connect(): address = request.remote_addr logging.info(\"Connect", "import startTunnel, stopTunnel, addressesForInterface from argparse import ArgumentParser import logging app = Flask(__name__)", "no IP4 address.\".format(args.interface)) return settings['localIP'] = addrs[0] settings['bridge'] = args.bridge app.run(host=addrs[0]) if __name__", "IP4 address.\".format(args.interface)) return settings['localIP'] = addrs[0] settings['bridge'] = args.bridge app.run(host=addrs[0]) if __name__ ==", "return settings['localIP'] = addrs[0] settings['bridge'] = args.bridge app.run(host=addrs[0]) if __name__ == '__main__': main()", "= addressesForInterface(args.interface) if addrs is None: logging.error(\"No such interface: {}\".format(args.interface)) return if len(addrs)", "logging app = Flask(__name__) settings = {} @app.route(\"/connect\") def connect(): address = request.remote_addr", "addrs is None: logging.error(\"No such interface: {}\".format(args.interface)) return if len(addrs) == 0: logging.error(\"Interface", "import request from util import startTunnel, stopTunnel, addressesForInterface from argparse import ArgumentParser import", "None: logging.error(\"No such interface: {}\".format(args.interface)) return if len(addrs) == 0: logging.error(\"Interface {} has", "type=str) parser.add_argument(\"interface\", type=str) args = parser.parse_args() addrs = addressesForInterface(args.interface) if addrs is None:", "= request.remote_addr logging.info(\"Connect request from {}\".format(address)) startTunnel(address, settings['localIP'], settings['bridge']) return \"Success\", 200 def", "import Flask from flask import request from util import startTunnel, stopTunnel, addressesForInterface from", "Flask from flask import request from util import startTunnel, stopTunnel, addressesForInterface from argparse", "stopTunnel, addressesForInterface from argparse import ArgumentParser import logging app = Flask(__name__) settings =", "= Flask(__name__) settings = {} @app.route(\"/connect\") def connect(): address = request.remote_addr logging.info(\"Connect request", "logging.error(\"No such interface: {}\".format(args.interface)) return if len(addrs) == 0: logging.error(\"Interface {} has no", "from util import startTunnel, stopTunnel, addressesForInterface from argparse import ArgumentParser import logging app", "global settings parser = ArgumentParser() parser.add_argument(\"--bridge\", type=str) parser.add_argument(\"interface\", type=str) args = parser.parse_args() addrs", "from argparse import ArgumentParser import logging app = Flask(__name__) settings = {} @app.route(\"/connect\")", "200 def main(): global settings parser = ArgumentParser() parser.add_argument(\"--bridge\", type=str) parser.add_argument(\"interface\", type=str) args", "ArgumentParser import logging app = Flask(__name__) settings = {} @app.route(\"/connect\") def connect(): address", "flask import request from util import startTunnel, stopTunnel, addressesForInterface from argparse import ArgumentParser", "logging.info(\"Connect request from {}\".format(address)) startTunnel(address, settings['localIP'], settings['bridge']) return \"Success\", 200 def main(): global", "parser = ArgumentParser() parser.add_argument(\"--bridge\", type=str) parser.add_argument(\"interface\", type=str) args = parser.parse_args() addrs = addressesForInterface(args.interface)", "is None: logging.error(\"No such interface: {}\".format(args.interface)) return if len(addrs) == 0: logging.error(\"Interface {}", "settings['localIP'], settings['bridge']) return \"Success\", 200 def main(): global settings parser = ArgumentParser() parser.add_argument(\"--bridge\",", "len(addrs) == 0: logging.error(\"Interface {} has no IP4 address.\".format(args.interface)) return settings['localIP'] = addrs[0]", "flask import Flask from flask import request from util import startTunnel, stopTunnel, addressesForInterface", "address = request.remote_addr logging.info(\"Connect request from {}\".format(address)) startTunnel(address, settings['localIP'], settings['bridge']) return \"Success\", 200", "from {}\".format(address)) startTunnel(address, settings['localIP'], settings['bridge']) return \"Success\", 200 def main(): global settings parser", "interface: {}\".format(args.interface)) return if len(addrs) == 0: logging.error(\"Interface {} has no IP4 address.\".format(args.interface))", "def main(): global settings parser = ArgumentParser() parser.add_argument(\"--bridge\", type=str) parser.add_argument(\"interface\", type=str) args =", "settings = {} @app.route(\"/connect\") def connect(): address = request.remote_addr logging.info(\"Connect request from {}\".format(address))", "parser.add_argument(\"--bridge\", type=str) parser.add_argument(\"interface\", type=str) args = parser.parse_args() addrs = addressesForInterface(args.interface) if addrs is", "startTunnel, stopTunnel, addressesForInterface from argparse import ArgumentParser import logging app = Flask(__name__) settings", "addressesForInterface from argparse import ArgumentParser import logging app = Flask(__name__) settings = {}", "{}\".format(args.interface)) return if len(addrs) == 0: logging.error(\"Interface {} has no IP4 address.\".format(args.interface)) return", "ArgumentParser() parser.add_argument(\"--bridge\", type=str) parser.add_argument(\"interface\", type=str) args = parser.parse_args() addrs = addressesForInterface(args.interface) if addrs", "= parser.parse_args() addrs = addressesForInterface(args.interface) if addrs is None: logging.error(\"No such interface: {}\".format(args.interface))", "logging.error(\"Interface {} has no IP4 address.\".format(args.interface)) return settings['localIP'] = addrs[0] settings['bridge'] = args.bridge", "\"Success\", 200 def main(): global settings parser = ArgumentParser() parser.add_argument(\"--bridge\", type=str) parser.add_argument(\"interface\", type=str)", "== 0: logging.error(\"Interface {} has no IP4 address.\".format(args.interface)) return settings['localIP'] = addrs[0] settings['bridge']", "import logging app = Flask(__name__) settings = {} @app.route(\"/connect\") def connect(): address =", "has no IP4 address.\".format(args.interface)) return settings['localIP'] = addrs[0] settings['bridge'] = args.bridge app.run(host=addrs[0]) if", "settings parser = ArgumentParser() parser.add_argument(\"--bridge\", type=str) parser.add_argument(\"interface\", type=str) args = parser.parse_args() addrs =", "args = parser.parse_args() addrs = addressesForInterface(args.interface) if addrs is None: logging.error(\"No such interface:", "request from util import startTunnel, stopTunnel, addressesForInterface from argparse import ArgumentParser import logging", "request from {}\".format(address)) startTunnel(address, settings['localIP'], settings['bridge']) return \"Success\", 200 def main(): global settings", "address.\".format(args.interface)) return settings['localIP'] = addrs[0] settings['bridge'] = args.bridge app.run(host=addrs[0]) if __name__ == '__main__':", "argparse import ArgumentParser import logging app = Flask(__name__) settings = {} @app.route(\"/connect\") def", "return \"Success\", 200 def main(): global settings parser = ArgumentParser() parser.add_argument(\"--bridge\", type=str) parser.add_argument(\"interface\",", "parser.add_argument(\"interface\", type=str) args = parser.parse_args() addrs = addressesForInterface(args.interface) if addrs is None: logging.error(\"No", "util import startTunnel, stopTunnel, addressesForInterface from argparse import ArgumentParser import logging app =", "from flask import request from util import startTunnel, stopTunnel, addressesForInterface from argparse import", "such interface: {}\".format(args.interface)) return if len(addrs) == 0: logging.error(\"Interface {} has no IP4", "from flask import Flask from flask import request from util import startTunnel, stopTunnel,", "addressesForInterface(args.interface) if addrs is None: logging.error(\"No such interface: {}\".format(args.interface)) return if len(addrs) ==", "startTunnel(address, settings['localIP'], settings['bridge']) return \"Success\", 200 def main(): global settings parser = ArgumentParser()", "#!/usr/bin/env python3 from flask import Flask from flask import request from util import", "python3 from flask import Flask from flask import request from util import startTunnel,", "import ArgumentParser import logging app = Flask(__name__) settings = {} @app.route(\"/connect\") def connect():", "= {} @app.route(\"/connect\") def connect(): address = request.remote_addr logging.info(\"Connect request from {}\".format(address)) startTunnel(address," ]
[ "= True) -> str: lines: list = [] lines.append(str(self.data)) for child in [self.left,", "data: Any): self.right = Node(data) def __str__(self, top: bool = True) -> str:", "data: Any, left=None, right=None): self.data: Any = data if left is not None:", "left = Node(left) if right is not None: right = Node(right) self.left =", "top: lines[index+1] += space_after_line else: lines.append(data) if top: lines[-1] += space_after_line for line_number", "range(1, len(lines) - 1): if len(lines[line_number + 1]) > \\ len(lines[line_number]): lines[line_number] +=", "top: bool = True) -> str: lines: list = [] lines.append(str(self.data)) for child", "True) -> str: lines: list = [] lines.append(str(self.data)) for child in [self.left, self.right]:", "data if left is not None: left = Node(left) if right is not", "+ 1]) > \\ len(lines[line_number]): lines[line_number] += \\ \" \" * (len(lines[line_number +", "len(lines[line_number + 1]) > \\ len(lines[line_number]): lines[line_number] += \\ \" \" * (len(lines[line_number", "lines: list = [] lines.append(str(self.data)) for child in [self.left, self.right]: if child is", "Node(left) if right is not None: right = Node(right) self.left = left self.right", "in [self.left, self.right]: if child is not None: for index, data in enumerate(child.__str__(top=False).split(\"\\n\")):", "right def set_left(self, data: Any): self.left = Node(data) def set_right(self, data: Any): self.right", "int((len(max(lines, key=len)) - len(str(self.data))) / 2) \\ + lines[0] return '\\n'.join(lines) def hasChildren(self)", "1): if len(lines[line_number + 1]) > \\ len(lines[line_number]): lines[line_number] += \\ \" \"", "import Any class Node: def __init__(self, data: Any, left=None, right=None): self.data: Any =", "- len(str(self.data))) / 2) \\ + lines[0] return '\\n'.join(lines) def hasChildren(self) -> bool:", "len(lines) - 1): if len(lines[line_number + 1]) > \\ len(lines[line_number]): lines[line_number] += \\", "\" * int((len(max(lines, key=len)) - len(str(self.data))) / 2) \\ + lines[0] return '\\n'.join(lines)", "* index if len(lines)-1 > index: lines[index+1] += \" \" + data if", "is not None: for index, data in enumerate(child.__str__(top=False).split(\"\\n\")): data = str(data) space_after_line =", "is not None: right = Node(right) self.left = left self.right = right def", "- len(lines[line_number])) lines[0] = \\ \" \" * int((len(max(lines, key=len)) - len(str(self.data))) /", "for index, data in enumerate(child.__str__(top=False).split(\"\\n\")): data = str(data) space_after_line = \" \" *", "left=None, right=None): self.data: Any = data if left is not None: left =", "list = [] lines.append(str(self.data)) for child in [self.left, self.right]: if child is not", "+ 1]) - len(lines[line_number])) lines[0] = \\ \" \" * int((len(max(lines, key=len)) -", "right=None): self.data: Any = data if left is not None: left = Node(left)", "- 1): if len(lines[line_number + 1]) > \\ len(lines[line_number]): lines[line_number] += \\ \"", "\\ len(lines[line_number]): lines[line_number] += \\ \" \" * (len(lines[line_number + 1]) - len(lines[line_number]))", "= Node(left) if right is not None: right = Node(right) self.left = left", "+= space_after_line else: lines.append(data) if top: lines[-1] += space_after_line for line_number in range(1,", "Node: def __init__(self, data: Any, left=None, right=None): self.data: Any = data if left", "Any): self.right = Node(data) def __str__(self, top: bool = True) -> str: lines:", "\" \" + data if top: lines[index+1] += space_after_line else: lines.append(data) if top:", "if len(lines[line_number + 1]) > \\ len(lines[line_number]): lines[line_number] += \\ \" \" *", "None: right = Node(right) self.left = left self.right = right def set_left(self, data:", "is not None: left = Node(left) if right is not None: right =", "\" \" * index if len(lines)-1 > index: lines[index+1] += \" \" +", "[] lines.append(str(self.data)) for child in [self.left, self.right]: if child is not None: for", "lines[index+1] += \" \" + data if top: lines[index+1] += space_after_line else: lines.append(data)", "> index: lines[index+1] += \" \" + data if top: lines[index+1] += space_after_line", "index: lines[index+1] += \" \" + data if top: lines[index+1] += space_after_line else:", "index, data in enumerate(child.__str__(top=False).split(\"\\n\")): data = str(data) space_after_line = \" \" * index", "> \\ len(lines[line_number]): lines[line_number] += \\ \" \" * (len(lines[line_number + 1]) -", "self.right = right def set_left(self, data: Any): self.left = Node(data) def set_right(self, data:", "typing import Any class Node: def __init__(self, data: Any, left=None, right=None): self.data: Any", "Node(data) def __str__(self, top: bool = True) -> str: lines: list = []", "def hasChildren(self) -> bool: return self.left is not None and self.right is not", "\" + data if top: lines[index+1] += space_after_line else: lines.append(data) if top: lines[-1]", "child in [self.left, self.right]: if child is not None: for index, data in", "= data if left is not None: left = Node(left) if right is", "right is not None: right = Node(right) self.left = left self.right = right", "self.left = Node(data) def set_right(self, data: Any): self.right = Node(data) def __str__(self, top:", "\" * index if len(lines)-1 > index: lines[index+1] += \" \" + data", "+= space_after_line for line_number in range(1, len(lines) - 1): if len(lines[line_number + 1])", "for child in [self.left, self.right]: if child is not None: for index, data", "def set_right(self, data: Any): self.right = Node(data) def __str__(self, top: bool = True)", "Node(data) def set_right(self, data: Any): self.right = Node(data) def __str__(self, top: bool =", "self.left = left self.right = right def set_left(self, data: Any): self.left = Node(data)", "in range(1, len(lines) - 1): if len(lines[line_number + 1]) > \\ len(lines[line_number]): lines[line_number]", "if child is not None: for index, data in enumerate(child.__str__(top=False).split(\"\\n\")): data = str(data)", "Node(right) self.left = left self.right = right def set_left(self, data: Any): self.left =", "+ data if top: lines[index+1] += space_after_line else: lines.append(data) if top: lines[-1] +=", "= right def set_left(self, data: Any): self.left = Node(data) def set_right(self, data: Any):", "lines[0] = \\ \" \" * int((len(max(lines, key=len)) - len(str(self.data))) / 2) \\", "space_after_line = \" \" * index if len(lines)-1 > index: lines[index+1] += \"", "return '\\n'.join(lines) def hasChildren(self) -> bool: return self.left is not None and self.right", "None: for index, data in enumerate(child.__str__(top=False).split(\"\\n\")): data = str(data) space_after_line = \" \"", "not None: for index, data in enumerate(child.__str__(top=False).split(\"\\n\")): data = str(data) space_after_line = \"", "lines[index+1] += space_after_line else: lines.append(data) if top: lines[-1] += space_after_line for line_number in", "None: left = Node(left) if right is not None: right = Node(right) self.left", "from typing import Any class Node: def __init__(self, data: Any, left=None, right=None): self.data:", "data = str(data) space_after_line = \" \" * index if len(lines)-1 > index:", "hasChildren(self) -> bool: return self.left is not None and self.right is not None", "enumerate(child.__str__(top=False).split(\"\\n\")): data = str(data) space_after_line = \" \" * index if len(lines)-1 >", "Any): self.left = Node(data) def set_right(self, data: Any): self.right = Node(data) def __str__(self,", "not None: left = Node(left) if right is not None: right = Node(right)", "class Node: def __init__(self, data: Any, left=None, right=None): self.data: Any = data if", "+ lines[0] return '\\n'.join(lines) def hasChildren(self) -> bool: return self.left is not None", "[self.left, self.right]: if child is not None: for index, data in enumerate(child.__str__(top=False).split(\"\\n\")): data", "set_left(self, data: Any): self.left = Node(data) def set_right(self, data: Any): self.right = Node(data)", "Any class Node: def __init__(self, data: Any, left=None, right=None): self.data: Any = data", "set_right(self, data: Any): self.right = Node(data) def __str__(self, top: bool = True) ->", "space_after_line else: lines.append(data) if top: lines[-1] += space_after_line for line_number in range(1, len(lines)", "if left is not None: left = Node(left) if right is not None:", "* (len(lines[line_number + 1]) - len(lines[line_number])) lines[0] = \\ \" \" * int((len(max(lines,", "+= \" \" + data if top: lines[index+1] += space_after_line else: lines.append(data) if", "Any = data if left is not None: left = Node(left) if right", "data if top: lines[index+1] += space_after_line else: lines.append(data) if top: lines[-1] += space_after_line", "left is not None: left = Node(left) if right is not None: right", "lines[0] return '\\n'.join(lines) def hasChildren(self) -> bool: return self.left is not None and", "* int((len(max(lines, key=len)) - len(str(self.data))) / 2) \\ + lines[0] return '\\n'.join(lines) def", "else: lines.append(data) if top: lines[-1] += space_after_line for line_number in range(1, len(lines) -", "if len(lines)-1 > index: lines[index+1] += \" \" + data if top: lines[index+1]", "space_after_line for line_number in range(1, len(lines) - 1): if len(lines[line_number + 1]) >", "<reponame>kethan1/Data-Structures<gh_stars>0 from typing import Any class Node: def __init__(self, data: Any, left=None, right=None):", "in enumerate(child.__str__(top=False).split(\"\\n\")): data = str(data) space_after_line = \" \" * index if len(lines)-1", "len(lines)-1 > index: lines[index+1] += \" \" + data if top: lines[index+1] +=", "(len(lines[line_number + 1]) - len(lines[line_number])) lines[0] = \\ \" \" * int((len(max(lines, key=len))", "len(str(self.data))) / 2) \\ + lines[0] return '\\n'.join(lines) def hasChildren(self) -> bool: return", "right = Node(right) self.left = left self.right = right def set_left(self, data: Any):", "data: Any): self.left = Node(data) def set_right(self, data: Any): self.right = Node(data) def", "def __init__(self, data: Any, left=None, right=None): self.data: Any = data if left is", "if top: lines[-1] += space_after_line for line_number in range(1, len(lines) - 1): if", "-> str: lines: list = [] lines.append(str(self.data)) for child in [self.left, self.right]: if", "left self.right = right def set_left(self, data: Any): self.left = Node(data) def set_right(self,", "\\ \" \" * int((len(max(lines, key=len)) - len(str(self.data))) / 2) \\ + lines[0]", "for line_number in range(1, len(lines) - 1): if len(lines[line_number + 1]) > \\", "/ 2) \\ + lines[0] return '\\n'.join(lines) def hasChildren(self) -> bool: return self.left", "= Node(right) self.left = left self.right = right def set_left(self, data: Any): self.left", "def __str__(self, top: bool = True) -> str: lines: list = [] lines.append(str(self.data))", "\" \" * int((len(max(lines, key=len)) - len(str(self.data))) / 2) \\ + lines[0] return", "lines.append(data) if top: lines[-1] += space_after_line for line_number in range(1, len(lines) - 1):", "\" \" * (len(lines[line_number + 1]) - len(lines[line_number])) lines[0] = \\ \" \"", "self.right = Node(data) def __str__(self, top: bool = True) -> str: lines: list", "= \\ \" \" * int((len(max(lines, key=len)) - len(str(self.data))) / 2) \\ +", "__str__(self, top: bool = True) -> str: lines: list = [] lines.append(str(self.data)) for", "bool = True) -> str: lines: list = [] lines.append(str(self.data)) for child in", "if right is not None: right = Node(right) self.left = left self.right =", "self.right]: if child is not None: for index, data in enumerate(child.__str__(top=False).split(\"\\n\")): data =", "'\\n'.join(lines) def hasChildren(self) -> bool: return self.left is not None and self.right is", "def set_left(self, data: Any): self.left = Node(data) def set_right(self, data: Any): self.right =", "+= \\ \" \" * (len(lines[line_number + 1]) - len(lines[line_number])) lines[0] = \\", "\\ \" \" * (len(lines[line_number + 1]) - len(lines[line_number])) lines[0] = \\ \"", "key=len)) - len(str(self.data))) / 2) \\ + lines[0] return '\\n'.join(lines) def hasChildren(self) ->", "top: lines[-1] += space_after_line for line_number in range(1, len(lines) - 1): if len(lines[line_number", "= \" \" * index if len(lines)-1 > index: lines[index+1] += \" \"", "= left self.right = right def set_left(self, data: Any): self.left = Node(data) def", "1]) - len(lines[line_number])) lines[0] = \\ \" \" * int((len(max(lines, key=len)) - len(str(self.data)))", "= Node(data) def __str__(self, top: bool = True) -> str: lines: list =", "data in enumerate(child.__str__(top=False).split(\"\\n\")): data = str(data) space_after_line = \" \" * index if", "len(lines[line_number])) lines[0] = \\ \" \" * int((len(max(lines, key=len)) - len(str(self.data))) / 2)", "2) \\ + lines[0] return '\\n'.join(lines) def hasChildren(self) -> bool: return self.left is", "lines[line_number] += \\ \" \" * (len(lines[line_number + 1]) - len(lines[line_number])) lines[0] =", "lines[-1] += space_after_line for line_number in range(1, len(lines) - 1): if len(lines[line_number +", "child is not None: for index, data in enumerate(child.__str__(top=False).split(\"\\n\")): data = str(data) space_after_line", "__init__(self, data: Any, left=None, right=None): self.data: Any = data if left is not", "line_number in range(1, len(lines) - 1): if len(lines[line_number + 1]) > \\ len(lines[line_number]):", "\" * (len(lines[line_number + 1]) - len(lines[line_number])) lines[0] = \\ \" \" *", "= str(data) space_after_line = \" \" * index if len(lines)-1 > index: lines[index+1]", "len(lines[line_number]): lines[line_number] += \\ \" \" * (len(lines[line_number + 1]) - len(lines[line_number])) lines[0]", "Any, left=None, right=None): self.data: Any = data if left is not None: left", "if top: lines[index+1] += space_after_line else: lines.append(data) if top: lines[-1] += space_after_line for", "not None: right = Node(right) self.left = left self.right = right def set_left(self,", "= [] lines.append(str(self.data)) for child in [self.left, self.right]: if child is not None:", "str: lines: list = [] lines.append(str(self.data)) for child in [self.left, self.right]: if child", "= Node(data) def set_right(self, data: Any): self.right = Node(data) def __str__(self, top: bool", "index if len(lines)-1 > index: lines[index+1] += \" \" + data if top:", "1]) > \\ len(lines[line_number]): lines[line_number] += \\ \" \" * (len(lines[line_number + 1])", "\\ + lines[0] return '\\n'.join(lines) def hasChildren(self) -> bool: return self.left is not", "str(data) space_after_line = \" \" * index if len(lines)-1 > index: lines[index+1] +=", "self.data: Any = data if left is not None: left = Node(left) if", "lines.append(str(self.data)) for child in [self.left, self.right]: if child is not None: for index," ]
[ "CONFIG_SWITCH_COMPONENT_SCHEMA = CONFIG_SENSOR_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FOREGROUND_PRESSED_COLOR): cv.use_id(color), cv.Optional(CONF_BACKGROUND_PRESSED_COLOR): cv.use_id(color), } ) ) async", "\"foreground_pressed_color\" CONF_FONT_ID = \"font_id\" def NextionName(value): valid_chars = f\"{ascii_letters + digits}.\" if not", "cv.boolean, } ) CONFIG_TEXT_COMPONENT_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Required(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255),", "\"on_wake\" CONF_ON_SETUP = \"on_setup\" CONF_TOUCH_SLEEP_TIMEOUT = \"touch_sleep_timeout\" CONF_WAKE_UP_PAGE = \"wake_up_page\" CONF_AUTO_WAKE_ON_TOUCH = \"auto_wake_on_touch\"", ") CONFIG_SENSOR_COMPONENT_SCHEMA = CONFIG_BINARY_SENSOR_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) ) CONFIG_SWITCH_COMPONENT_SCHEMA", "string import ascii_letters, digits import esphome.config_validation as cv import esphome.codegen as cg from", "\"background_color\" CONF_BACKGROUND_PRESSED_COLOR = \"background_pressed_color\" CONF_FOREGROUND_COLOR = \"foreground_color\" CONF_FOREGROUND_PRESSED_COLOR = \"foreground_pressed_color\" CONF_FONT_ID = \"font_id\"", "CONF_FOREGROUND_COLOR in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_COLOR]) cg.add(var.set_foreground_color(color_component)) if CONF_FOREGROUND_PRESSED_COLOR in config: color_component", "cv.int_range(min=0, max=255), } ) ) CONFIG_SWITCH_COMPONENT_SCHEMA = CONFIG_SENSOR_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FOREGROUND_PRESSED_COLOR): cv.use_id(color), cv.Optional(CONF_BACKGROUND_PRESSED_COLOR):", "char in value: if char not in valid_chars: raise cv.Invalid( f\"Must only consist", "\"auto_wake_on_touch\" CONF_WAVE_MAX_LENGTH = \"wave_max_length\" CONF_BACKGROUND_COLOR = \"background_color\" CONF_BACKGROUND_PRESSED_COLOR = \"background_pressed_color\" CONF_FOREGROUND_COLOR = \"foreground_color\"", "> 29: raise cv.Invalid(\"Must be a string less than 29 characters\") for char", "cv.Optional(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_VARIABLE_NAME): NextionName, } ) ) CONFIG_SENSOR_COMPONENT_SCHEMA = CONFIG_BINARY_SENSOR_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FONT_ID):", "CONF_BACKGROUND_PRESSED_COLOR in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR]) cg.add(var.set_background_pressed_color(color_component)) if CONF_FOREGROUND_COLOR in config: color_component", "config: color_component = await cg.get_variable(config[CONF_FOREGROUND_COLOR]) cg.add(var.set_foreground_color(color_component)) if CONF_FOREGROUND_PRESSED_COLOR in config: color_component = await", "from . import Nextion CONF_VARIABLE_NAME = \"variable_name\" CONF_COMPONENT_NAME = \"component_name\" CONF_WAVE_CHANNEL_ID = \"wave_channel_id\"", "CONF_BACKGROUND_COLOR in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_COLOR]) cg.add(var.set_background_color(color_component)) if CONF_BACKGROUND_PRESSED_COLOR in config: color_component", "+ arg, ) ) if CONF_BACKGROUND_COLOR in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_COLOR]) cg.add(var.set_background_color(color_component))", "only consist of upper/lowercase characters, numbers and the period '.'. The character '{char}'", "characters\") for char in value: if char not in valid_chars: raise cv.Invalid( f\"Must", "in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_COLOR]) cg.add(var.set_foreground_color(color_component)) if CONF_FOREGROUND_PRESSED_COLOR in config: color_component =", "{ cv.Required(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) ) CONFIG_BINARY_SENSOR_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema(", "\"font_id\" def NextionName(value): valid_chars = f\"{ascii_letters + digits}.\" if not isinstance(value, str) or", "digits}.\" if not isinstance(value, str) or len(value) > 29: raise cv.Invalid(\"Must be a", "in config: cg.add( var.set_variable_name( config[CONF_COMPONENT_NAME], config[CONF_COMPONENT_NAME] + arg, ) ) if CONF_BACKGROUND_COLOR in", "{ cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) ) CONFIG_SWITCH_COMPONENT_SCHEMA = CONFIG_SENSOR_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FOREGROUND_PRESSED_COLOR):", "arg, ) ) if CONF_BACKGROUND_COLOR in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_COLOR]) cg.add(var.set_background_color(color_component)) if", "\"on_sleep\" CONF_ON_WAKE = \"on_wake\" CONF_ON_SETUP = \"on_setup\" CONF_TOUCH_SLEEP_TIMEOUT = \"touch_sleep_timeout\" CONF_WAKE_UP_PAGE = \"wake_up_page\"", "import color from esphome.const import ( CONF_VISIBLE, ) from . import CONF_NEXTION_ID from", "\"wave_channel_id\" CONF_WAVE_MAX_VALUE = \"wave_max_value\" CONF_PRECISION = \"precision\" CONF_WAVEFORM_SEND_LAST_VALUE = \"waveform_send_last_value\" CONF_TFT_URL = \"tft_url\"", ") ) CONFIG_SWITCH_COMPONENT_SCHEMA = CONFIG_SENSOR_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FOREGROUND_PRESSED_COLOR): cv.use_id(color), cv.Optional(CONF_BACKGROUND_PRESSED_COLOR): cv.use_id(color), } )", "= \"font_id\" def NextionName(value): valid_chars = f\"{ascii_letters + digits}.\" if not isinstance(value, str)", "CONF_FOREGROUND_PRESSED_COLOR in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR]) cg.add(var.set_foreground_pressed_color(color_component)) if CONF_FONT_ID in config: cg.add(var.set_font_id(config[CONF_FONT_ID]))", "config: color_component = await cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR]) cg.add(var.set_foreground_pressed_color(color_component)) if CONF_FONT_ID in config: cg.add(var.set_font_id(config[CONF_FONT_ID])) if CONF_VISIBLE", "the period '.'. The character '{char}' cannot be used.\" ) return value CONFIG_BASE_COMPONENT_SCHEMA", "cv.Schema( { cv.Required(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) ) CONFIG_BINARY_SENSOR_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend(", "character '{char}' cannot be used.\" ) return value CONFIG_BASE_COMPONENT_SCHEMA = cv.Schema( { cv.GenerateID(CONF_NEXTION_ID):", "= await cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR]) cg.add(var.set_background_pressed_color(color_component)) if CONF_FOREGROUND_COLOR in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_COLOR]) cg.add(var.set_foreground_color(color_component))", ") ) CONFIG_BINARY_SENSOR_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_VARIABLE_NAME): NextionName, } )", "= CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Required(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) ) CONFIG_BINARY_SENSOR_SCHEMA", "not isinstance(value, str) or len(value) > 29: raise cv.Invalid(\"Must be a string less", "char not in valid_chars: raise cv.Invalid( f\"Must only consist of upper/lowercase characters, numbers", "cv.Invalid( f\"Must only consist of upper/lowercase characters, numbers and the period '.'. The", "be used.\" ) return value CONFIG_BASE_COMPONENT_SCHEMA = cv.Schema( { cv.GenerateID(CONF_NEXTION_ID): cv.use_id(Nextion), cv.Optional(CONF_BACKGROUND_COLOR): cv.use_id(color),", "CONF_VARIABLE_NAME = \"variable_name\" CONF_COMPONENT_NAME = \"component_name\" CONF_WAVE_CHANNEL_ID = \"wave_channel_id\" CONF_WAVE_MAX_VALUE = \"wave_max_value\" CONF_PRECISION", ") return value CONFIG_BASE_COMPONENT_SCHEMA = cv.Schema( { cv.GenerateID(CONF_NEXTION_ID): cv.use_id(Nextion), cv.Optional(CONF_BACKGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_FOREGROUND_COLOR): cv.use_id(color),", "{ cv.Optional(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_VARIABLE_NAME): NextionName, } ) ) CONFIG_SENSOR_COMPONENT_SCHEMA = CONFIG_BINARY_SENSOR_SCHEMA.extend( cv.Schema( {", "cv.use_id(color), cv.Optional(CONF_BACKGROUND_PRESSED_COLOR): cv.use_id(color), } ) ) async def setup_component_core_(var, config, arg): if CONF_VARIABLE_NAME", "def NextionName(value): valid_chars = f\"{ascii_letters + digits}.\" if not isinstance(value, str) or len(value)", "as cg from esphome.components import color from esphome.const import ( CONF_VISIBLE, ) from", "\"background_pressed_color\" CONF_FOREGROUND_COLOR = \"foreground_color\" CONF_FOREGROUND_PRESSED_COLOR = \"foreground_pressed_color\" CONF_FONT_ID = \"font_id\" def NextionName(value): valid_chars", "cv.use_id(Nextion), cv.Optional(CONF_BACKGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_FOREGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_VISIBLE, default=True): cv.boolean, } ) CONFIG_TEXT_COMPONENT_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend(", "CONF_TOUCH_SLEEP_TIMEOUT = \"touch_sleep_timeout\" CONF_WAKE_UP_PAGE = \"wake_up_page\" CONF_AUTO_WAKE_ON_TOUCH = \"auto_wake_on_touch\" CONF_WAVE_MAX_LENGTH = \"wave_max_length\" CONF_BACKGROUND_COLOR", "in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR]) cg.add(var.set_foreground_pressed_color(color_component)) if CONF_FONT_ID in config: cg.add(var.set_font_id(config[CONF_FONT_ID])) if", "cg from esphome.components import color from esphome.const import ( CONF_VISIBLE, ) from .", "NextionName(value): valid_chars = f\"{ascii_letters + digits}.\" if not isinstance(value, str) or len(value) >", "less than 29 characters\") for char in value: if char not in valid_chars:", "config: cg.add( var.set_variable_name( config[CONF_COMPONENT_NAME], config[CONF_COMPONENT_NAME] + arg, ) ) if CONF_BACKGROUND_COLOR in config:", "await cg.get_variable(config[CONF_BACKGROUND_COLOR]) cg.add(var.set_background_color(color_component)) if CONF_BACKGROUND_PRESSED_COLOR in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR]) cg.add(var.set_background_pressed_color(color_component)) if", "\"on_setup\" CONF_TOUCH_SLEEP_TIMEOUT = \"touch_sleep_timeout\" CONF_WAKE_UP_PAGE = \"wake_up_page\" CONF_AUTO_WAKE_ON_TOUCH = \"auto_wake_on_touch\" CONF_WAVE_MAX_LENGTH = \"wave_max_length\"", "= \"on_sleep\" CONF_ON_WAKE = \"on_wake\" CONF_ON_SETUP = \"on_setup\" CONF_TOUCH_SLEEP_TIMEOUT = \"touch_sleep_timeout\" CONF_WAKE_UP_PAGE =", "CONF_VARIABLE_NAME in config: cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME])) elif CONF_COMPONENT_NAME in config: cg.add( var.set_variable_name( config[CONF_COMPONENT_NAME], config[CONF_COMPONENT_NAME] +", "await cg.get_variable(config[CONF_FOREGROUND_COLOR]) cg.add(var.set_foreground_color(color_component)) if CONF_FOREGROUND_PRESSED_COLOR in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR]) cg.add(var.set_foreground_pressed_color(color_component)) if", "} ) ) CONFIG_SWITCH_COMPONENT_SCHEMA = CONFIG_SENSOR_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FOREGROUND_PRESSED_COLOR): cv.use_id(color), cv.Optional(CONF_BACKGROUND_PRESSED_COLOR): cv.use_id(color), }", "cv.Schema( { cv.GenerateID(CONF_NEXTION_ID): cv.use_id(Nextion), cv.Optional(CONF_BACKGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_FOREGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_VISIBLE, default=True): cv.boolean, } )", "} ) ) CONFIG_BINARY_SENSOR_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_VARIABLE_NAME): NextionName, }", "in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_COLOR]) cg.add(var.set_background_color(color_component)) if CONF_BACKGROUND_PRESSED_COLOR in config: color_component =", "= \"background_pressed_color\" CONF_FOREGROUND_COLOR = \"foreground_color\" CONF_FOREGROUND_PRESSED_COLOR = \"foreground_pressed_color\" CONF_FONT_ID = \"font_id\" def NextionName(value):", "CONFIG_SENSOR_COMPONENT_SCHEMA = CONFIG_BINARY_SENSOR_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) ) CONFIG_SWITCH_COMPONENT_SCHEMA =", "= CONFIG_SENSOR_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FOREGROUND_PRESSED_COLOR): cv.use_id(color), cv.Optional(CONF_BACKGROUND_PRESSED_COLOR): cv.use_id(color), } ) ) async def", "import ( CONF_VISIBLE, ) from . import CONF_NEXTION_ID from . import Nextion CONF_VARIABLE_NAME", "cv.Optional(CONF_BACKGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_FOREGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_VISIBLE, default=True): cv.boolean, } ) CONFIG_TEXT_COMPONENT_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema(", "CONF_PRECISION = \"precision\" CONF_WAVEFORM_SEND_LAST_VALUE = \"waveform_send_last_value\" CONF_TFT_URL = \"tft_url\" CONF_ON_SLEEP = \"on_sleep\" CONF_ON_WAKE", "cv import esphome.codegen as cg from esphome.components import color from esphome.const import (", "cv.Schema( { cv.Optional(CONF_FOREGROUND_PRESSED_COLOR): cv.use_id(color), cv.Optional(CONF_BACKGROUND_PRESSED_COLOR): cv.use_id(color), } ) ) async def setup_component_core_(var, config,", "CONFIG_SENSOR_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FOREGROUND_PRESSED_COLOR): cv.use_id(color), cv.Optional(CONF_BACKGROUND_PRESSED_COLOR): cv.use_id(color), } ) ) async def setup_component_core_(var,", "= \"waveform_send_last_value\" CONF_TFT_URL = \"tft_url\" CONF_ON_SLEEP = \"on_sleep\" CONF_ON_WAKE = \"on_wake\" CONF_ON_SETUP =", "for char in value: if char not in valid_chars: raise cv.Invalid( f\"Must only", "\"wake_up_page\" CONF_AUTO_WAKE_ON_TOUCH = \"auto_wake_on_touch\" CONF_WAVE_MAX_LENGTH = \"wave_max_length\" CONF_BACKGROUND_COLOR = \"background_color\" CONF_BACKGROUND_PRESSED_COLOR = \"background_pressed_color\"", "( CONF_VISIBLE, ) from . import CONF_NEXTION_ID from . import Nextion CONF_VARIABLE_NAME =", "return value CONFIG_BASE_COMPONENT_SCHEMA = cv.Schema( { cv.GenerateID(CONF_NEXTION_ID): cv.use_id(Nextion), cv.Optional(CONF_BACKGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_FOREGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_VISIBLE,", "max=255), } ) ) CONFIG_BINARY_SENSOR_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_VARIABLE_NAME): NextionName,", "if CONF_BACKGROUND_COLOR in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_COLOR]) cg.add(var.set_background_color(color_component)) if CONF_BACKGROUND_PRESSED_COLOR in config:", "cv.Schema( { cv.Optional(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_VARIABLE_NAME): NextionName, } ) ) CONFIG_SENSOR_COMPONENT_SCHEMA = CONFIG_BINARY_SENSOR_SCHEMA.extend( cv.Schema(", "{ cv.Optional(CONF_FOREGROUND_PRESSED_COLOR): cv.use_id(color), cv.Optional(CONF_BACKGROUND_PRESSED_COLOR): cv.use_id(color), } ) ) async def setup_component_core_(var, config, arg):", "} ) ) async def setup_component_core_(var, config, arg): if CONF_VARIABLE_NAME in config: cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME]))", "valid_chars = f\"{ascii_letters + digits}.\" if not isinstance(value, str) or len(value) > 29:", "cv.int_range(min=0, max=255), } ) ) CONFIG_BINARY_SENSOR_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_VARIABLE_NAME):", ") CONFIG_TEXT_COMPONENT_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Required(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } )", "= \"on_wake\" CONF_ON_SETUP = \"on_setup\" CONF_TOUCH_SLEEP_TIMEOUT = \"touch_sleep_timeout\" CONF_WAKE_UP_PAGE = \"wake_up_page\" CONF_AUTO_WAKE_ON_TOUCH =", "import ascii_letters, digits import esphome.config_validation as cv import esphome.codegen as cg from esphome.components", "CONF_ON_WAKE = \"on_wake\" CONF_ON_SETUP = \"on_setup\" CONF_TOUCH_SLEEP_TIMEOUT = \"touch_sleep_timeout\" CONF_WAKE_UP_PAGE = \"wake_up_page\" CONF_AUTO_WAKE_ON_TOUCH", "\"foreground_color\" CONF_FOREGROUND_PRESSED_COLOR = \"foreground_pressed_color\" CONF_FONT_ID = \"font_id\" def NextionName(value): valid_chars = f\"{ascii_letters +", "= \"touch_sleep_timeout\" CONF_WAKE_UP_PAGE = \"wake_up_page\" CONF_AUTO_WAKE_ON_TOUCH = \"auto_wake_on_touch\" CONF_WAVE_MAX_LENGTH = \"wave_max_length\" CONF_BACKGROUND_COLOR =", "+ digits}.\" if not isinstance(value, str) or len(value) > 29: raise cv.Invalid(\"Must be", "= \"auto_wake_on_touch\" CONF_WAVE_MAX_LENGTH = \"wave_max_length\" CONF_BACKGROUND_COLOR = \"background_color\" CONF_BACKGROUND_PRESSED_COLOR = \"background_pressed_color\" CONF_FOREGROUND_COLOR =", "period '.'. The character '{char}' cannot be used.\" ) return value CONFIG_BASE_COMPONENT_SCHEMA =", "= \"on_setup\" CONF_TOUCH_SLEEP_TIMEOUT = \"touch_sleep_timeout\" CONF_WAKE_UP_PAGE = \"wake_up_page\" CONF_AUTO_WAKE_ON_TOUCH = \"auto_wake_on_touch\" CONF_WAVE_MAX_LENGTH =", "than 29 characters\") for char in value: if char not in valid_chars: raise", "\"component_name\" CONF_WAVE_CHANNEL_ID = \"wave_channel_id\" CONF_WAVE_MAX_VALUE = \"wave_max_value\" CONF_PRECISION = \"precision\" CONF_WAVEFORM_SEND_LAST_VALUE = \"waveform_send_last_value\"", "config, arg): if CONF_VARIABLE_NAME in config: cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME])) elif CONF_COMPONENT_NAME in config: cg.add( var.set_variable_name(", "of upper/lowercase characters, numbers and the period '.'. The character '{char}' cannot be", "arg): if CONF_VARIABLE_NAME in config: cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME])) elif CONF_COMPONENT_NAME in config: cg.add( var.set_variable_name( config[CONF_COMPONENT_NAME],", "def setup_component_core_(var, config, arg): if CONF_VARIABLE_NAME in config: cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME])) elif CONF_COMPONENT_NAME in config:", "from esphome.components import color from esphome.const import ( CONF_VISIBLE, ) from . import", "CONF_AUTO_WAKE_ON_TOUCH = \"auto_wake_on_touch\" CONF_WAVE_MAX_LENGTH = \"wave_max_length\" CONF_BACKGROUND_COLOR = \"background_color\" CONF_BACKGROUND_PRESSED_COLOR = \"background_pressed_color\" CONF_FOREGROUND_COLOR", "esphome.codegen as cg from esphome.components import color from esphome.const import ( CONF_VISIBLE, )", "'.'. The character '{char}' cannot be used.\" ) return value CONFIG_BASE_COMPONENT_SCHEMA = cv.Schema(", ") ) async def setup_component_core_(var, config, arg): if CONF_VARIABLE_NAME in config: cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME])) elif", "cg.add( var.set_variable_name( config[CONF_COMPONENT_NAME], config[CONF_COMPONENT_NAME] + arg, ) ) if CONF_BACKGROUND_COLOR in config: color_component", "color_component = await cg.get_variable(config[CONF_BACKGROUND_COLOR]) cg.add(var.set_background_color(color_component)) if CONF_BACKGROUND_PRESSED_COLOR in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR])", "from . import CONF_NEXTION_ID from . import Nextion CONF_VARIABLE_NAME = \"variable_name\" CONF_COMPONENT_NAME =", "\"wave_max_length\" CONF_BACKGROUND_COLOR = \"background_color\" CONF_BACKGROUND_PRESSED_COLOR = \"background_pressed_color\" CONF_FOREGROUND_COLOR = \"foreground_color\" CONF_FOREGROUND_PRESSED_COLOR = \"foreground_pressed_color\"", "cv.Invalid(\"Must be a string less than 29 characters\") for char in value: if", "esphome.components import color from esphome.const import ( CONF_VISIBLE, ) from . import CONF_NEXTION_ID", "and the period '.'. The character '{char}' cannot be used.\" ) return value", "= \"background_color\" CONF_BACKGROUND_PRESSED_COLOR = \"background_pressed_color\" CONF_FOREGROUND_COLOR = \"foreground_color\" CONF_FOREGROUND_PRESSED_COLOR = \"foreground_pressed_color\" CONF_FONT_ID =", "The character '{char}' cannot be used.\" ) return value CONFIG_BASE_COMPONENT_SCHEMA = cv.Schema( {", "cv.Schema( { cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) ) CONFIG_SWITCH_COMPONENT_SCHEMA = CONFIG_SENSOR_COMPONENT_SCHEMA.extend( cv.Schema( {", "not in valid_chars: raise cv.Invalid( f\"Must only consist of upper/lowercase characters, numbers and", "cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR]) cg.add(var.set_background_pressed_color(color_component)) if CONF_FOREGROUND_COLOR in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_COLOR]) cg.add(var.set_foreground_color(color_component)) if CONF_FOREGROUND_PRESSED_COLOR", "if CONF_FOREGROUND_PRESSED_COLOR in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR]) cg.add(var.set_foreground_pressed_color(color_component)) if CONF_FONT_ID in config:", "cv.use_id(color), cv.Optional(CONF_FOREGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_VISIBLE, default=True): cv.boolean, } ) CONFIG_TEXT_COMPONENT_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( {", ") if CONF_BACKGROUND_COLOR in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_COLOR]) cg.add(var.set_background_color(color_component)) if CONF_BACKGROUND_PRESSED_COLOR in", "cg.get_variable(config[CONF_BACKGROUND_COLOR]) cg.add(var.set_background_color(color_component)) if CONF_BACKGROUND_PRESSED_COLOR in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR]) cg.add(var.set_background_pressed_color(color_component)) if CONF_FOREGROUND_COLOR", "if CONF_BACKGROUND_PRESSED_COLOR in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR]) cg.add(var.set_background_pressed_color(color_component)) if CONF_FOREGROUND_COLOR in config:", "str) or len(value) > 29: raise cv.Invalid(\"Must be a string less than 29", "characters, numbers and the period '.'. The character '{char}' cannot be used.\" )", "NextionName, } ) ) CONFIG_SENSOR_COMPONENT_SCHEMA = CONFIG_BINARY_SENSOR_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), }", "be a string less than 29 characters\") for char in value: if char", "cg.add(var.set_background_pressed_color(color_component)) if CONF_FOREGROUND_COLOR in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_COLOR]) cg.add(var.set_foreground_color(color_component)) if CONF_FOREGROUND_PRESSED_COLOR in", "len(value) > 29: raise cv.Invalid(\"Must be a string less than 29 characters\") for", "await cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR]) cg.add(var.set_foreground_pressed_color(color_component)) if CONF_FONT_ID in config: cg.add(var.set_font_id(config[CONF_FONT_ID])) if CONF_VISIBLE in config: cg.add(var.set_visible(config[CONF_VISIBLE]))", "digits import esphome.config_validation as cv import esphome.codegen as cg from esphome.components import color", "if CONF_VARIABLE_NAME in config: cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME])) elif CONF_COMPONENT_NAME in config: cg.add( var.set_variable_name( config[CONF_COMPONENT_NAME], config[CONF_COMPONENT_NAME]", "= CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_VARIABLE_NAME): NextionName, } ) ) CONFIG_SENSOR_COMPONENT_SCHEMA =", "CONF_FOREGROUND_PRESSED_COLOR = \"foreground_pressed_color\" CONF_FONT_ID = \"font_id\" def NextionName(value): valid_chars = f\"{ascii_letters + digits}.\"", "} ) ) CONFIG_SENSOR_COMPONENT_SCHEMA = CONFIG_BINARY_SENSOR_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } )", ") CONFIG_SWITCH_COMPONENT_SCHEMA = CONFIG_SENSOR_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FOREGROUND_PRESSED_COLOR): cv.use_id(color), cv.Optional(CONF_BACKGROUND_PRESSED_COLOR): cv.use_id(color), } ) )", "color_component = await cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR]) cg.add(var.set_background_pressed_color(color_component)) if CONF_FOREGROUND_COLOR in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_COLOR])", "29 characters\") for char in value: if char not in valid_chars: raise cv.Invalid(", "= \"tft_url\" CONF_ON_SLEEP = \"on_sleep\" CONF_ON_WAKE = \"on_wake\" CONF_ON_SETUP = \"on_setup\" CONF_TOUCH_SLEEP_TIMEOUT =", "string less than 29 characters\") for char in value: if char not in", "CONFIG_BASE_COMPONENT_SCHEMA = cv.Schema( { cv.GenerateID(CONF_NEXTION_ID): cv.use_id(Nextion), cv.Optional(CONF_BACKGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_FOREGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_VISIBLE, default=True): cv.boolean,", "config[CONF_COMPONENT_NAME] + arg, ) ) if CONF_BACKGROUND_COLOR in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_COLOR])", "numbers and the period '.'. The character '{char}' cannot be used.\" ) return", "= \"foreground_color\" CONF_FOREGROUND_PRESSED_COLOR = \"foreground_pressed_color\" CONF_FONT_ID = \"font_id\" def NextionName(value): valid_chars = f\"{ascii_letters", "CONF_BACKGROUND_PRESSED_COLOR = \"background_pressed_color\" CONF_FOREGROUND_COLOR = \"foreground_color\" CONF_FOREGROUND_PRESSED_COLOR = \"foreground_pressed_color\" CONF_FONT_ID = \"font_id\" def", "NextionName, cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) ) CONFIG_BINARY_SENSOR_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_COMPONENT_NAME):", "in config: cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME])) elif CONF_COMPONENT_NAME in config: cg.add( var.set_variable_name( config[CONF_COMPONENT_NAME], config[CONF_COMPONENT_NAME] + arg,", "from esphome.const import ( CONF_VISIBLE, ) from . import CONF_NEXTION_ID from . import", "'{char}' cannot be used.\" ) return value CONFIG_BASE_COMPONENT_SCHEMA = cv.Schema( { cv.GenerateID(CONF_NEXTION_ID): cv.use_id(Nextion),", "value CONFIG_BASE_COMPONENT_SCHEMA = cv.Schema( { cv.GenerateID(CONF_NEXTION_ID): cv.use_id(Nextion), cv.Optional(CONF_BACKGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_FOREGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_VISIBLE, default=True):", "= \"foreground_pressed_color\" CONF_FONT_ID = \"font_id\" def NextionName(value): valid_chars = f\"{ascii_letters + digits}.\" if", "import esphome.config_validation as cv import esphome.codegen as cg from esphome.components import color from", "= \"variable_name\" CONF_COMPONENT_NAME = \"component_name\" CONF_WAVE_CHANNEL_ID = \"wave_channel_id\" CONF_WAVE_MAX_VALUE = \"wave_max_value\" CONF_PRECISION =", ") ) CONFIG_SENSOR_COMPONENT_SCHEMA = CONFIG_BINARY_SENSOR_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) )", "cg.get_variable(config[CONF_FOREGROUND_COLOR]) cg.add(var.set_foreground_color(color_component)) if CONF_FOREGROUND_PRESSED_COLOR in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR]) cg.add(var.set_foreground_pressed_color(color_component)) if CONF_FONT_ID", "cv.use_id(color), } ) ) async def setup_component_core_(var, config, arg): if CONF_VARIABLE_NAME in config:", ") async def setup_component_core_(var, config, arg): if CONF_VARIABLE_NAME in config: cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME])) elif CONF_COMPONENT_NAME", "cv.Optional(CONF_VARIABLE_NAME): NextionName, } ) ) CONFIG_SENSOR_COMPONENT_SCHEMA = CONFIG_BINARY_SENSOR_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255),", "\"waveform_send_last_value\" CONF_TFT_URL = \"tft_url\" CONF_ON_SLEEP = \"on_sleep\" CONF_ON_WAKE = \"on_wake\" CONF_ON_SETUP = \"on_setup\"", "config: cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME])) elif CONF_COMPONENT_NAME in config: cg.add( var.set_variable_name( config[CONF_COMPONENT_NAME], config[CONF_COMPONENT_NAME] + arg, )", "CONF_COMPONENT_NAME in config: cg.add( var.set_variable_name( config[CONF_COMPONENT_NAME], config[CONF_COMPONENT_NAME] + arg, ) ) if CONF_BACKGROUND_COLOR", "from string import ascii_letters, digits import esphome.config_validation as cv import esphome.codegen as cg", "\"tft_url\" CONF_ON_SLEEP = \"on_sleep\" CONF_ON_WAKE = \"on_wake\" CONF_ON_SETUP = \"on_setup\" CONF_TOUCH_SLEEP_TIMEOUT = \"touch_sleep_timeout\"", "default=True): cv.boolean, } ) CONFIG_TEXT_COMPONENT_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Required(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_FONT_ID): cv.int_range(min=0,", "CONF_TFT_URL = \"tft_url\" CONF_ON_SLEEP = \"on_sleep\" CONF_ON_WAKE = \"on_wake\" CONF_ON_SETUP = \"on_setup\" CONF_TOUCH_SLEEP_TIMEOUT", "= \"component_name\" CONF_WAVE_CHANNEL_ID = \"wave_channel_id\" CONF_WAVE_MAX_VALUE = \"wave_max_value\" CONF_PRECISION = \"precision\" CONF_WAVEFORM_SEND_LAST_VALUE =", "used.\" ) return value CONFIG_BASE_COMPONENT_SCHEMA = cv.Schema( { cv.GenerateID(CONF_NEXTION_ID): cv.use_id(Nextion), cv.Optional(CONF_BACKGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_FOREGROUND_COLOR):", "cg.add(var.set_background_color(color_component)) if CONF_BACKGROUND_PRESSED_COLOR in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR]) cg.add(var.set_background_pressed_color(color_component)) if CONF_FOREGROUND_COLOR in", "cg.add(var.set_foreground_color(color_component)) if CONF_FOREGROUND_PRESSED_COLOR in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR]) cg.add(var.set_foreground_pressed_color(color_component)) if CONF_FONT_ID in", "= await cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR]) cg.add(var.set_foreground_pressed_color(color_component)) if CONF_FONT_ID in config: cg.add(var.set_font_id(config[CONF_FONT_ID])) if CONF_VISIBLE in config:", "setup_component_core_(var, config, arg): if CONF_VARIABLE_NAME in config: cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME])) elif CONF_COMPONENT_NAME in config: cg.add(", "= CONFIG_BINARY_SENSOR_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) ) CONFIG_SWITCH_COMPONENT_SCHEMA = CONFIG_SENSOR_COMPONENT_SCHEMA.extend(", "value: if char not in valid_chars: raise cv.Invalid( f\"Must only consist of upper/lowercase", "CONF_BACKGROUND_COLOR = \"background_color\" CONF_BACKGROUND_PRESSED_COLOR = \"background_pressed_color\" CONF_FOREGROUND_COLOR = \"foreground_color\" CONF_FOREGROUND_PRESSED_COLOR = \"foreground_pressed_color\" CONF_FONT_ID", "CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Required(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) ) CONFIG_BINARY_SENSOR_SCHEMA =", "{ cv.GenerateID(CONF_NEXTION_ID): cv.use_id(Nextion), cv.Optional(CONF_BACKGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_FOREGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_VISIBLE, default=True): cv.boolean, } ) CONFIG_TEXT_COMPONENT_SCHEMA", ") from . import CONF_NEXTION_ID from . import Nextion CONF_VARIABLE_NAME = \"variable_name\" CONF_COMPONENT_NAME", "raise cv.Invalid(\"Must be a string less than 29 characters\") for char in value:", "cv.Optional(CONF_VISIBLE, default=True): cv.boolean, } ) CONFIG_TEXT_COMPONENT_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Required(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_FONT_ID):", "await cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR]) cg.add(var.set_background_pressed_color(color_component)) if CONF_FOREGROUND_COLOR in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_COLOR]) cg.add(var.set_foreground_color(color_component)) if", "raise cv.Invalid( f\"Must only consist of upper/lowercase characters, numbers and the period '.'.", "color from esphome.const import ( CONF_VISIBLE, ) from . import CONF_NEXTION_ID from .", "} ) CONFIG_TEXT_COMPONENT_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Required(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), }", "cv.Optional(CONF_FOREGROUND_PRESSED_COLOR): cv.use_id(color), cv.Optional(CONF_BACKGROUND_PRESSED_COLOR): cv.use_id(color), } ) ) async def setup_component_core_(var, config, arg): if", "CONF_FOREGROUND_COLOR = \"foreground_color\" CONF_FOREGROUND_PRESSED_COLOR = \"foreground_pressed_color\" CONF_FONT_ID = \"font_id\" def NextionName(value): valid_chars =", "cv.Optional(CONF_BACKGROUND_PRESSED_COLOR): cv.use_id(color), } ) ) async def setup_component_core_(var, config, arg): if CONF_VARIABLE_NAME in", "config[CONF_COMPONENT_NAME], config[CONF_COMPONENT_NAME] + arg, ) ) if CONF_BACKGROUND_COLOR in config: color_component = await", "color_component = await cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR]) cg.add(var.set_foreground_pressed_color(color_component)) if CONF_FONT_ID in config: cg.add(var.set_font_id(config[CONF_FONT_ID])) if CONF_VISIBLE in", "if not isinstance(value, str) or len(value) > 29: raise cv.Invalid(\"Must be a string", "if char not in valid_chars: raise cv.Invalid( f\"Must only consist of upper/lowercase characters,", "= \"precision\" CONF_WAVEFORM_SEND_LAST_VALUE = \"waveform_send_last_value\" CONF_TFT_URL = \"tft_url\" CONF_ON_SLEEP = \"on_sleep\" CONF_ON_WAKE =", "= cv.Schema( { cv.GenerateID(CONF_NEXTION_ID): cv.use_id(Nextion), cv.Optional(CONF_BACKGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_FOREGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_VISIBLE, default=True): cv.boolean, }", "CONF_NEXTION_ID from . import Nextion CONF_VARIABLE_NAME = \"variable_name\" CONF_COMPONENT_NAME = \"component_name\" CONF_WAVE_CHANNEL_ID =", "CONFIG_BINARY_SENSOR_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) ) CONFIG_SWITCH_COMPONENT_SCHEMA = CONFIG_SENSOR_COMPONENT_SCHEMA.extend( cv.Schema(", "29: raise cv.Invalid(\"Must be a string less than 29 characters\") for char in", "CONFIG_BINARY_SENSOR_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_VARIABLE_NAME): NextionName, } ) ) CONFIG_SENSOR_COMPONENT_SCHEMA", "max=255), } ) ) CONFIG_SWITCH_COMPONENT_SCHEMA = CONFIG_SENSOR_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FOREGROUND_PRESSED_COLOR): cv.use_id(color), cv.Optional(CONF_BACKGROUND_PRESSED_COLOR): cv.use_id(color),", ". import Nextion CONF_VARIABLE_NAME = \"variable_name\" CONF_COMPONENT_NAME = \"component_name\" CONF_WAVE_CHANNEL_ID = \"wave_channel_id\" CONF_WAVE_MAX_VALUE", "valid_chars: raise cv.Invalid( f\"Must only consist of upper/lowercase characters, numbers and the period", "CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_VARIABLE_NAME): NextionName, } ) ) CONFIG_SENSOR_COMPONENT_SCHEMA = CONFIG_BINARY_SENSOR_SCHEMA.extend(", "= await cg.get_variable(config[CONF_FOREGROUND_COLOR]) cg.add(var.set_foreground_color(color_component)) if CONF_FOREGROUND_PRESSED_COLOR in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR]) cg.add(var.set_foreground_pressed_color(color_component))", "CONF_WAVE_CHANNEL_ID = \"wave_channel_id\" CONF_WAVE_MAX_VALUE = \"wave_max_value\" CONF_PRECISION = \"precision\" CONF_WAVEFORM_SEND_LAST_VALUE = \"waveform_send_last_value\" CONF_TFT_URL", "cv.Optional(CONF_FOREGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_VISIBLE, default=True): cv.boolean, } ) CONFIG_TEXT_COMPONENT_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Required(CONF_COMPONENT_NAME):", "as cv import esphome.codegen as cg from esphome.components import color from esphome.const import", "cv.GenerateID(CONF_NEXTION_ID): cv.use_id(Nextion), cv.Optional(CONF_BACKGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_FOREGROUND_COLOR): cv.use_id(color), cv.Optional(CONF_VISIBLE, default=True): cv.boolean, } ) CONFIG_TEXT_COMPONENT_SCHEMA =", ") CONFIG_BINARY_SENSOR_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_VARIABLE_NAME): NextionName, } ) )", "= \"wake_up_page\" CONF_AUTO_WAKE_ON_TOUCH = \"auto_wake_on_touch\" CONF_WAVE_MAX_LENGTH = \"wave_max_length\" CONF_BACKGROUND_COLOR = \"background_color\" CONF_BACKGROUND_PRESSED_COLOR =", "cv.use_id(color), cv.Optional(CONF_VISIBLE, default=True): cv.boolean, } ) CONFIG_TEXT_COMPONENT_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Required(CONF_COMPONENT_NAME): NextionName,", "in value: if char not in valid_chars: raise cv.Invalid( f\"Must only consist of", "\"variable_name\" CONF_COMPONENT_NAME = \"component_name\" CONF_WAVE_CHANNEL_ID = \"wave_channel_id\" CONF_WAVE_MAX_VALUE = \"wave_max_value\" CONF_PRECISION = \"precision\"", "CONF_VISIBLE, ) from . import CONF_NEXTION_ID from . import Nextion CONF_VARIABLE_NAME = \"variable_name\"", "isinstance(value, str) or len(value) > 29: raise cv.Invalid(\"Must be a string less than", "ascii_letters, digits import esphome.config_validation as cv import esphome.codegen as cg from esphome.components import", "elif CONF_COMPONENT_NAME in config: cg.add( var.set_variable_name( config[CONF_COMPONENT_NAME], config[CONF_COMPONENT_NAME] + arg, ) ) if", "= f\"{ascii_letters + digits}.\" if not isinstance(value, str) or len(value) > 29: raise", "a string less than 29 characters\") for char in value: if char not", "config: color_component = await cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR]) cg.add(var.set_background_pressed_color(color_component)) if CONF_FOREGROUND_COLOR in config: color_component = await", "esphome.config_validation as cv import esphome.codegen as cg from esphome.components import color from esphome.const", "f\"{ascii_letters + digits}.\" if not isinstance(value, str) or len(value) > 29: raise cv.Invalid(\"Must", "CONF_WAVE_MAX_VALUE = \"wave_max_value\" CONF_PRECISION = \"precision\" CONF_WAVEFORM_SEND_LAST_VALUE = \"waveform_send_last_value\" CONF_TFT_URL = \"tft_url\" CONF_ON_SLEEP", "cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) ) CONFIG_BINARY_SENSOR_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_COMPONENT_NAME): NextionName,", "CONF_ON_SLEEP = \"on_sleep\" CONF_ON_WAKE = \"on_wake\" CONF_ON_SETUP = \"on_setup\" CONF_TOUCH_SLEEP_TIMEOUT = \"touch_sleep_timeout\" CONF_WAKE_UP_PAGE", "= \"wave_max_value\" CONF_PRECISION = \"precision\" CONF_WAVEFORM_SEND_LAST_VALUE = \"waveform_send_last_value\" CONF_TFT_URL = \"tft_url\" CONF_ON_SLEEP =", "or len(value) > 29: raise cv.Invalid(\"Must be a string less than 29 characters\")", "CONF_WAVEFORM_SEND_LAST_VALUE = \"waveform_send_last_value\" CONF_TFT_URL = \"tft_url\" CONF_ON_SLEEP = \"on_sleep\" CONF_ON_WAKE = \"on_wake\" CONF_ON_SETUP", "esphome.const import ( CONF_VISIBLE, ) from . import CONF_NEXTION_ID from . import Nextion", "= await cg.get_variable(config[CONF_BACKGROUND_COLOR]) cg.add(var.set_background_color(color_component)) if CONF_BACKGROUND_PRESSED_COLOR in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR]) cg.add(var.set_background_pressed_color(color_component))", "Nextion CONF_VARIABLE_NAME = \"variable_name\" CONF_COMPONENT_NAME = \"component_name\" CONF_WAVE_CHANNEL_ID = \"wave_channel_id\" CONF_WAVE_MAX_VALUE = \"wave_max_value\"", "cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) ) CONFIG_SWITCH_COMPONENT_SCHEMA = CONFIG_SENSOR_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FOREGROUND_PRESSED_COLOR): cv.use_id(color),", "CONF_COMPONENT_NAME = \"component_name\" CONF_WAVE_CHANNEL_ID = \"wave_channel_id\" CONF_WAVE_MAX_VALUE = \"wave_max_value\" CONF_PRECISION = \"precision\" CONF_WAVEFORM_SEND_LAST_VALUE", "cannot be used.\" ) return value CONFIG_BASE_COMPONENT_SCHEMA = cv.Schema( { cv.GenerateID(CONF_NEXTION_ID): cv.use_id(Nextion), cv.Optional(CONF_BACKGROUND_COLOR):", "cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME])) elif CONF_COMPONENT_NAME in config: cg.add( var.set_variable_name( config[CONF_COMPONENT_NAME], config[CONF_COMPONENT_NAME] + arg, ) )", "CONFIG_TEXT_COMPONENT_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( { cv.Required(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) )", "\"wave_max_value\" CONF_PRECISION = \"precision\" CONF_WAVEFORM_SEND_LAST_VALUE = \"waveform_send_last_value\" CONF_TFT_URL = \"tft_url\" CONF_ON_SLEEP = \"on_sleep\"", "CONF_WAVE_MAX_LENGTH = \"wave_max_length\" CONF_BACKGROUND_COLOR = \"background_color\" CONF_BACKGROUND_PRESSED_COLOR = \"background_pressed_color\" CONF_FOREGROUND_COLOR = \"foreground_color\" CONF_FOREGROUND_PRESSED_COLOR", "CONF_WAKE_UP_PAGE = \"wake_up_page\" CONF_AUTO_WAKE_ON_TOUCH = \"auto_wake_on_touch\" CONF_WAVE_MAX_LENGTH = \"wave_max_length\" CONF_BACKGROUND_COLOR = \"background_color\" CONF_BACKGROUND_PRESSED_COLOR", "import CONF_NEXTION_ID from . import Nextion CONF_VARIABLE_NAME = \"variable_name\" CONF_COMPONENT_NAME = \"component_name\" CONF_WAVE_CHANNEL_ID", "in valid_chars: raise cv.Invalid( f\"Must only consist of upper/lowercase characters, numbers and the", "f\"Must only consist of upper/lowercase characters, numbers and the period '.'. The character", "CONF_FONT_ID = \"font_id\" def NextionName(value): valid_chars = f\"{ascii_letters + digits}.\" if not isinstance(value,", ") ) if CONF_BACKGROUND_COLOR in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_COLOR]) cg.add(var.set_background_color(color_component)) if CONF_BACKGROUND_PRESSED_COLOR", "CONF_ON_SETUP = \"on_setup\" CONF_TOUCH_SLEEP_TIMEOUT = \"touch_sleep_timeout\" CONF_WAKE_UP_PAGE = \"wake_up_page\" CONF_AUTO_WAKE_ON_TOUCH = \"auto_wake_on_touch\" CONF_WAVE_MAX_LENGTH", ". import CONF_NEXTION_ID from . import Nextion CONF_VARIABLE_NAME = \"variable_name\" CONF_COMPONENT_NAME = \"component_name\"", "color_component = await cg.get_variable(config[CONF_FOREGROUND_COLOR]) cg.add(var.set_foreground_color(color_component)) if CONF_FOREGROUND_PRESSED_COLOR in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR])", "\"precision\" CONF_WAVEFORM_SEND_LAST_VALUE = \"waveform_send_last_value\" CONF_TFT_URL = \"tft_url\" CONF_ON_SLEEP = \"on_sleep\" CONF_ON_WAKE = \"on_wake\"", "\"touch_sleep_timeout\" CONF_WAKE_UP_PAGE = \"wake_up_page\" CONF_AUTO_WAKE_ON_TOUCH = \"auto_wake_on_touch\" CONF_WAVE_MAX_LENGTH = \"wave_max_length\" CONF_BACKGROUND_COLOR = \"background_color\"", "import esphome.codegen as cg from esphome.components import color from esphome.const import ( CONF_VISIBLE,", "cv.Required(CONF_COMPONENT_NAME): NextionName, cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255), } ) ) CONFIG_BINARY_SENSOR_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend( cv.Schema( {", "async def setup_component_core_(var, config, arg): if CONF_VARIABLE_NAME in config: cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME])) elif CONF_COMPONENT_NAME in", "consist of upper/lowercase characters, numbers and the period '.'. The character '{char}' cannot", "upper/lowercase characters, numbers and the period '.'. The character '{char}' cannot be used.\"", "if CONF_FOREGROUND_COLOR in config: color_component = await cg.get_variable(config[CONF_FOREGROUND_COLOR]) cg.add(var.set_foreground_color(color_component)) if CONF_FOREGROUND_PRESSED_COLOR in config:", "= \"wave_channel_id\" CONF_WAVE_MAX_VALUE = \"wave_max_value\" CONF_PRECISION = \"precision\" CONF_WAVEFORM_SEND_LAST_VALUE = \"waveform_send_last_value\" CONF_TFT_URL =", "import Nextion CONF_VARIABLE_NAME = \"variable_name\" CONF_COMPONENT_NAME = \"component_name\" CONF_WAVE_CHANNEL_ID = \"wave_channel_id\" CONF_WAVE_MAX_VALUE =", "var.set_variable_name( config[CONF_COMPONENT_NAME], config[CONF_COMPONENT_NAME] + arg, ) ) if CONF_BACKGROUND_COLOR in config: color_component =", "config: color_component = await cg.get_variable(config[CONF_BACKGROUND_COLOR]) cg.add(var.set_background_color(color_component)) if CONF_BACKGROUND_PRESSED_COLOR in config: color_component = await", "NextionName, cv.Optional(CONF_VARIABLE_NAME): NextionName, } ) ) CONFIG_SENSOR_COMPONENT_SCHEMA = CONFIG_BINARY_SENSOR_SCHEMA.extend( cv.Schema( { cv.Optional(CONF_FONT_ID): cv.int_range(min=0,", "in config: color_component = await cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR]) cg.add(var.set_background_pressed_color(color_component)) if CONF_FOREGROUND_COLOR in config: color_component =", "= \"wave_max_length\" CONF_BACKGROUND_COLOR = \"background_color\" CONF_BACKGROUND_PRESSED_COLOR = \"background_pressed_color\" CONF_FOREGROUND_COLOR = \"foreground_color\" CONF_FOREGROUND_PRESSED_COLOR =" ]
[ "koapy import KiwoomOpenApiPlusQAxWidget app = QApplication(sys.argv) control = KiwoomOpenApiPlusQAxWidget() APIModulePath = control.GetAPIModulePath() print(APIModulePath)", "from koapy.compat.pyside2.QtWidgets import QApplication from koapy import KiwoomOpenApiPlusQAxWidget app = QApplication(sys.argv) control =", "sys from koapy.compat.pyside2.QtWidgets import QApplication from koapy import KiwoomOpenApiPlusQAxWidget app = QApplication(sys.argv) control", "import QApplication from koapy import KiwoomOpenApiPlusQAxWidget app = QApplication(sys.argv) control = KiwoomOpenApiPlusQAxWidget() APIModulePath", "import sys from koapy.compat.pyside2.QtWidgets import QApplication from koapy import KiwoomOpenApiPlusQAxWidget app = QApplication(sys.argv)", "from koapy import KiwoomOpenApiPlusQAxWidget app = QApplication(sys.argv) control = KiwoomOpenApiPlusQAxWidget() APIModulePath = control.GetAPIModulePath()", "QApplication from koapy import KiwoomOpenApiPlusQAxWidget app = QApplication(sys.argv) control = KiwoomOpenApiPlusQAxWidget() APIModulePath =", "koapy.compat.pyside2.QtWidgets import QApplication from koapy import KiwoomOpenApiPlusQAxWidget app = QApplication(sys.argv) control = KiwoomOpenApiPlusQAxWidget()" ]
[ "course_id, json_data) def create_external_tool_in_account(self, account_id, json_data): return self._create_external_tool(ACCOUNTS_API, account_id, json_data) def _create_external_tool(self, context,", "\"\"\" url = ACCOUNTS_API.format(account_id) + \"/external_tools\" external_tools = [] for data in self._get_paged_resource(url,", "ExternalToolsException(Exception): pass class ExternalTools(Canvas): def get_external_tools_in_account(self, account_id, params={}): \"\"\" Return external tools for", "url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( COURSES_API, course_id, tool_id) def", "external tools for the passed canvas account id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = ACCOUNTS_API.format(account_id)", "account sis id. \"\"\" return self.get_external_tools_in_account(self._sis_id(sis_id, \"account\")) def get_external_tools_in_course(self, course_id, params={}): \"\"\" Return", "launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self.get_sessionless_launch_url_from_course( tool_id, self._sis_id(course_sis_id, \"course\"))", "on context. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create \"\"\" url = context.format(context_id) + \"/external_tools\" return self._post_resource(url, body=json_data) def", "get_external_tools_in_course(self, course_id, params={}): \"\"\" Return external tools for the passed canvas course id.", "a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self.get_sessionless_launch_url_from_course( tool_id,", "\"\"\" return self._get_sessionless_launch_url( COURSES_API, course_id, tool_id) def get_sessionless_launch_url_from_course_sis_id( self, tool_id, course_sis_id): \"\"\" Get", "canvas course id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = COURSES_API.format(course_id) + \"/external_tools\" external_tools = []", "external_tool_id, json_data) def update_external_tool_in_account(self, account_id, external_tool_id, json_data): return self._update_external_tool(ACCOUNTS_API, account_id, external_tool_id, json_data) def", "json_data): return self._create_external_tool(COURSES_API, course_id, json_data) def create_external_tool_in_account(self, account_id, json_data): return self._create_external_tool(ACCOUNTS_API, account_id, json_data)", "Update the external tool identified by external_tool_id with the passed json data. context", "def update_external_tool_in_course(self, course_id, external_tool_id, json_data): return self._update_external_tool(COURSES_API, course_id, external_tool_id, json_data) def update_external_tool_in_account(self, account_id,", "return self._delete_external_tool(COURSES_API, course_id, external_tool_id) def delete_external_tool_in_account(self, account_id, external_tool_id): return self._delete_external_tool(ACCOUNTS_API, account_id, external_tool_id) def", "launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( ACCOUNTS_API, account_id, tool_id)", "context, context_id, tool_id): \"\"\" Get a sessionless launch url for an external tool.", "from uw_canvas import Canvas from uw_canvas.accounts import ACCOUNTS_API from uw_canvas.courses import COURSES_API class", "url = ACCOUNTS_API.format(account_id) + \"/external_tools\" external_tools = [] for data in self._get_paged_resource(url, params=params):", "return self._get_sessionless_launch_url( COURSES_API, course_id, tool_id) def get_sessionless_launch_url_from_course_sis_id( self, tool_id, course_sis_id): \"\"\" Get a", "\"\"\" return self._get_sessionless_launch_url( ACCOUNTS_API, account_id, tool_id) def get_sessionless_launch_url_from_account_sis_id( self, tool_id, account_sis_id): \"\"\" Get", "course_id, external_tool_id, json_data): return self._update_external_tool(COURSES_API, course_id, external_tool_id, json_data) def update_external_tool_in_account(self, account_id, external_tool_id, json_data):", "url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self.get_sessionless_launch_url_from_account( tool_id, self._sis_id(account_sis_id, \"account\")) def", "https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" url = context.format(context_id) + \"/external_tools/sessionless_launch\" params = {\"id\": tool_id} return self._get_resource(url,", "# Copyright 2021 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 from uw_canvas import", "external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( COURSES_API, course_id, tool_id) def get_sessionless_launch_url_from_course_sis_id( self, tool_id,", "self._post_resource(url, body=json_data) def update_external_tool_in_course(self, course_id, external_tool_id, json_data): return self._update_external_tool(COURSES_API, course_id, external_tool_id, json_data) def", "def _update_external_tool(self, context, context_id, external_tool_id, json_data): \"\"\" Update the external tool identified by", "external_tool_id) def delete_external_tool_in_account(self, account_id, external_tool_id): return self._delete_external_tool(ACCOUNTS_API, account_id, external_tool_id) def _delete_external_tool(self, context, context_id,", "external_tool_id): \"\"\" Delete the external tool identified by external_tool_id. context is either COURSES_API", "https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy \"\"\" url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) response = self._delete_resource(url) return True", "the external tool identified by external_tool_id with the passed json data. context is", "= {\"id\": tool_id} return self._get_resource(url, params) def get_sessionless_launch_url_from_account(self, tool_id, account_id): \"\"\" Get a", "class ExternalTools(Canvas): def get_external_tools_in_account(self, account_id, params={}): \"\"\" Return external tools for the passed", "https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = ACCOUNTS_API.format(account_id) + \"/external_tools\" external_tools = [] for data in", "account_id, external_tool_id, json_data) def _update_external_tool(self, context, context_id, external_tool_id, json_data): \"\"\" Update the external", "self, tool_id, course_sis_id): \"\"\" Get a sessionless launch url for an external tool.", "Return external tools for the passed canvas account id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url =", "return self.get_external_tools_in_course(self._sis_id(sis_id, \"course\")) def create_external_tool_in_course(self, course_id, json_data): return self._create_external_tool(COURSES_API, course_id, json_data) def create_external_tool_in_account(self,", "create_external_tool_in_account(self, account_id, json_data): return self._create_external_tool(ACCOUNTS_API, account_id, json_data) def _create_external_tool(self, context, context_id, json_data): \"\"\"", "with the passed json data. context is either COURSES_API or ACCOUNTS_API. context_id is", "class ExternalToolsException(Exception): pass class ExternalTools(Canvas): def get_external_tools_in_account(self, account_id, params={}): \"\"\" Return external tools", "return self._post_resource(url, body=json_data) def update_external_tool_in_course(self, course_id, external_tool_id, json_data): return self._update_external_tool(COURSES_API, course_id, external_tool_id, json_data)", "\"\"\" Return external tools for given account sis id. \"\"\" return self.get_external_tools_in_account(self._sis_id(sis_id, \"account\"))", "course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy \"\"\" url = context.format(context_id) + \"/external_tools/{}\".format(", "import COURSES_API class ExternalToolsException(Exception): pass class ExternalTools(Canvas): def get_external_tools_in_account(self, account_id, params={}): \"\"\" Return", "return self._get_sessionless_launch_url( ACCOUNTS_API, account_id, tool_id) def get_sessionless_launch_url_from_account_sis_id( self, tool_id, account_sis_id): \"\"\" Get a", "https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create \"\"\" url = context.format(context_id) + \"/external_tools\" return self._post_resource(url, body=json_data) def update_external_tool_in_course(self, course_id,", "course_sis_id): \"\"\" Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\"", "COURSES_API or ACCOUNTS_API. context_id is the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update", "tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self.get_sessionless_launch_url_from_account( tool_id, self._sis_id(account_sis_id, \"account\")) def get_sessionless_launch_url_from_course(self, tool_id, course_id): \"\"\"", "context_id is the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update \"\"\" url =", "url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) response = self._delete_resource(url) return True def _get_sessionless_launch_url(self,", "identified by external_tool_id with the passed json data. context is either COURSES_API or", "context_id, external_tool_id): \"\"\" Delete the external tool identified by external_tool_id. context is either", "external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" url = context.format(context_id) + \"/external_tools/sessionless_launch\" params = {\"id\": tool_id}", "\"\"\" url = COURSES_API.format(course_id) + \"/external_tools\" external_tools = [] for data in self._get_paged_resource(url,", "context. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create \"\"\" url = context.format(context_id) + \"/external_tools\" return self._post_resource(url, body=json_data) def update_external_tool_in_course(self,", "\"account\")) def get_sessionless_launch_url_from_course(self, tool_id, course_id): \"\"\" Get a sessionless launch url for an", "delete_external_tool_in_account(self, account_id, external_tool_id): return self._delete_external_tool(ACCOUNTS_API, account_id, external_tool_id) def _delete_external_tool(self, context, context_id, external_tool_id): \"\"\"", "external tools for the passed canvas course id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = COURSES_API.format(course_id)", "\"\"\" Return external tools for given course sis id. \"\"\" return self.get_external_tools_in_course(self._sis_id(sis_id, \"course\"))", "body=json_data) def delete_external_tool_in_course(self, course_id, external_tool_id): return self._delete_external_tool(COURSES_API, course_id, external_tool_id) def delete_external_tool_in_account(self, account_id, external_tool_id):", "Washington # SPDX-License-Identifier: Apache-2.0 from uw_canvas import Canvas from uw_canvas.accounts import ACCOUNTS_API from", "self.get_external_tools_in_course(self._sis_id(sis_id, \"course\")) def create_external_tool_in_course(self, course_id, json_data): return self._create_external_tool(COURSES_API, course_id, json_data) def create_external_tool_in_account(self, account_id,", "tools for given account sis id. \"\"\" return self.get_external_tools_in_account(self._sis_id(sis_id, \"account\")) def get_external_tools_in_course(self, course_id,", "\"\"\" Delete the external tool identified by external_tool_id. context is either COURSES_API or", "account id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = ACCOUNTS_API.format(account_id) + \"/external_tools\" external_tools = [] for", "external_tools.append(data) return external_tools def get_external_tools_in_course_by_sis_id(self, sis_id): \"\"\" Return external tools for given course", "for data in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def get_external_tools_in_course_by_sis_id(self, sis_id): \"\"\" Return", "# SPDX-License-Identifier: Apache-2.0 from uw_canvas import Canvas from uw_canvas.accounts import ACCOUNTS_API from uw_canvas.courses", "either COURSES_API or ACCOUNTS_API. context_id is the course_id or account_id, depending on context", "identified by external_tool_id. context is either COURSES_API or ACCOUNTS_API. context_id is the course_id", "course_id, tool_id) def get_sessionless_launch_url_from_course_sis_id( self, tool_id, course_sis_id): \"\"\" Get a sessionless launch url", "+ \"/external_tools/{}\".format( external_tool_id) response = self._delete_resource(url) return True def _get_sessionless_launch_url(self, context, context_id, tool_id):", "self._create_external_tool(COURSES_API, course_id, json_data) def create_external_tool_in_account(self, account_id, json_data): return self._create_external_tool(ACCOUNTS_API, account_id, json_data) def _create_external_tool(self,", "account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy \"\"\" url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) response", "context, context_id, external_tool_id): \"\"\" Delete the external tool identified by external_tool_id. context is", "Return external tools for the passed canvas course id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url =", "on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update \"\"\" url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) return self._put_resource(url, body=json_data)", "external_tool_id) def _delete_external_tool(self, context, context_id, external_tool_id): \"\"\" Delete the external tool identified by", "\"\"\" Return external tools for the passed canvas account id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url", "\"\"\" Return external tools for the passed canvas course id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url", "return self.get_sessionless_launch_url_from_account( tool_id, self._sis_id(account_sis_id, \"account\")) def get_sessionless_launch_url_from_course(self, tool_id, course_id): \"\"\" Get a sessionless", "self._get_resource(url, params) def get_sessionless_launch_url_from_account(self, tool_id, account_id): \"\"\" Get a sessionless launch url for", "json_data) def _create_external_tool(self, context, context_id, json_data): \"\"\" Create an external tool using the", "course_id): \"\"\" Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\"", "course_id or account_id, depending on context. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create \"\"\" url = context.format(context_id) + \"/external_tools\"", "\"course\")) def create_external_tool_in_course(self, course_id, json_data): return self._create_external_tool(COURSES_API, course_id, json_data) def create_external_tool_in_account(self, account_id, json_data):", "def get_sessionless_launch_url_from_course(self, tool_id, course_id): \"\"\" Get a sessionless launch url for an external", "or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update \"\"\" url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id)", "= context.format(context_id) + \"/external_tools/sessionless_launch\" params = {\"id\": tool_id} return self._get_resource(url, params) def get_sessionless_launch_url_from_account(self,", "external_tool_id): return self._delete_external_tool(ACCOUNTS_API, account_id, external_tool_id) def _delete_external_tool(self, context, context_id, external_tool_id): \"\"\" Delete the", "UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 from uw_canvas import Canvas from uw_canvas.accounts", "external tools for given account sis id. \"\"\" return self.get_external_tools_in_account(self._sis_id(sis_id, \"account\")) def get_external_tools_in_course(self,", "def delete_external_tool_in_account(self, account_id, external_tool_id): return self._delete_external_tool(ACCOUNTS_API, account_id, external_tool_id) def _delete_external_tool(self, context, context_id, external_tool_id):", "is either COURSES_API or ACCOUNTS_API. context_id is the course_id or account_id, depending on", "tool_id, course_sis_id): \"\"\" Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch", "context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy \"\"\" url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) response = self._delete_resource(url) return", "self._get_sessionless_launch_url( COURSES_API, course_id, tool_id) def get_sessionless_launch_url_from_course_sis_id( self, tool_id, course_sis_id): \"\"\" Get a sessionless", "= context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) response = self._delete_resource(url) return True def _get_sessionless_launch_url(self, context,", "tool_id, self._sis_id(account_sis_id, \"account\")) def get_sessionless_launch_url_from_course(self, tool_id, course_id): \"\"\" Get a sessionless launch url", "ACCOUNTS_API. context_id is the Canvas course_id or account_id, depending on context. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create \"\"\"", "for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( ACCOUNTS_API, account_id, tool_id) def get_sessionless_launch_url_from_account_sis_id(", "sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self.get_sessionless_launch_url_from_course( tool_id, self._sis_id(course_sis_id,", "import Canvas from uw_canvas.accounts import ACCOUNTS_API from uw_canvas.courses import COURSES_API class ExternalToolsException(Exception): pass", "context, context_id, external_tool_id, json_data): \"\"\" Update the external tool identified by external_tool_id with", "= ACCOUNTS_API.format(account_id) + \"/external_tools\" external_tools = [] for data in self._get_paged_resource(url, params=params): external_tools.append(data)", "account_id, external_tool_id, json_data): return self._update_external_tool(ACCOUNTS_API, account_id, external_tool_id, json_data) def _update_external_tool(self, context, context_id, external_tool_id,", "get_sessionless_launch_url_from_course(self, tool_id, course_id): \"\"\" Get a sessionless launch url for an external tool.", "course_id, external_tool_id, json_data) def update_external_tool_in_account(self, account_id, external_tool_id, json_data): return self._update_external_tool(ACCOUNTS_API, account_id, external_tool_id, json_data)", "return self._create_external_tool(COURSES_API, course_id, json_data) def create_external_tool_in_account(self, account_id, json_data): return self._create_external_tool(ACCOUNTS_API, account_id, json_data) def", "json_data. context is either COURSES_API or ACCOUNTS_API. context_id is the Canvas course_id or", "uw_canvas import Canvas from uw_canvas.accounts import ACCOUNTS_API from uw_canvas.courses import COURSES_API class ExternalToolsException(Exception):", "launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( COURSES_API, course_id, tool_id)", "Canvas from uw_canvas.accounts import ACCOUNTS_API from uw_canvas.courses import COURSES_API class ExternalToolsException(Exception): pass class", "\"/external_tools/{}\".format( external_tool_id) response = self._delete_resource(url) return True def _get_sessionless_launch_url(self, context, context_id, tool_id): \"\"\"", "the passed json data. context is either COURSES_API or ACCOUNTS_API. context_id is the", "get_external_tools_in_account_by_sis_id(self, sis_id): \"\"\" Return external tools for given account sis id. \"\"\" return", "course id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = COURSES_API.format(course_id) + \"/external_tools\" external_tools = [] for", "context_id is the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy \"\"\" url =", "self._update_external_tool(ACCOUNTS_API, account_id, external_tool_id, json_data) def _update_external_tool(self, context, context_id, external_tool_id, json_data): \"\"\" Update the", "get_sessionless_launch_url_from_account(self, tool_id, account_id): \"\"\" Get a sessionless launch url for an external tool.", "University of Washington # SPDX-License-Identifier: Apache-2.0 from uw_canvas import Canvas from uw_canvas.accounts import", "for given course sis id. \"\"\" return self.get_external_tools_in_course(self._sis_id(sis_id, \"course\")) def create_external_tool_in_course(self, course_id, json_data):", "def get_external_tools_in_account_by_sis_id(self, sis_id): \"\"\" Return external tools for given account sis id. \"\"\"", "sis_id): \"\"\" Return external tools for given course sis id. \"\"\" return self.get_external_tools_in_course(self._sis_id(sis_id,", "return external_tools def get_external_tools_in_course_by_sis_id(self, sis_id): \"\"\" Return external tools for given course sis", "the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy \"\"\" url = context.format(context_id) +", "account_id, tool_id) def get_sessionless_launch_url_from_account_sis_id( self, tool_id, account_sis_id): \"\"\" Get a sessionless launch url", "ACCOUNTS_API, account_id, tool_id) def get_sessionless_launch_url_from_account_sis_id( self, tool_id, account_sis_id): \"\"\" Get a sessionless launch", "\"/external_tools\" return self._post_resource(url, body=json_data) def update_external_tool_in_course(self, course_id, external_tool_id, json_data): return self._update_external_tool(COURSES_API, course_id, external_tool_id,", "tool_id} return self._get_resource(url, params) def get_sessionless_launch_url_from_account(self, tool_id, account_id): \"\"\" Get a sessionless launch", "COURSES_API or ACCOUNTS_API. context_id is the Canvas course_id or account_id, depending on context.", "external_tool_id) response = self._delete_resource(url) return True def _get_sessionless_launch_url(self, context, context_id, tool_id): \"\"\" Get", "of Washington # SPDX-License-Identifier: Apache-2.0 from uw_canvas import Canvas from uw_canvas.accounts import ACCOUNTS_API", "= COURSES_API.format(course_id) + \"/external_tools\" external_tools = [] for data in self._get_paged_resource(url, params=params): external_tools.append(data)", "a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self.get_sessionless_launch_url_from_account( tool_id,", "tool_id, course_id): \"\"\" Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch", "for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( COURSES_API, course_id, tool_id) def get_sessionless_launch_url_from_course_sis_id(", "json_data): return self._update_external_tool(ACCOUNTS_API, account_id, external_tool_id, json_data) def _update_external_tool(self, context, context_id, external_tool_id, json_data): \"\"\"", "depending on context. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create \"\"\" url = context.format(context_id) + \"/external_tools\" return self._post_resource(url, body=json_data)", "get_sessionless_launch_url_from_account_sis_id( self, tool_id, account_sis_id): \"\"\" Get a sessionless launch url for an external", "tool_id, account_sis_id): \"\"\" Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch", "launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self.get_sessionless_launch_url_from_account( tool_id, self._sis_id(account_sis_id, \"account\"))", "sis id. \"\"\" return self.get_external_tools_in_account(self._sis_id(sis_id, \"account\")) def get_external_tools_in_course(self, course_id, params={}): \"\"\" Return external", "Copyright 2021 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 from uw_canvas import Canvas", "external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self.get_sessionless_launch_url_from_account( tool_id, self._sis_id(account_sis_id, \"account\")) def get_sessionless_launch_url_from_course(self, tool_id, course_id):", "is the Canvas course_id or account_id, depending on context. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create \"\"\" url =", "given course sis id. \"\"\" return self.get_external_tools_in_course(self._sis_id(sis_id, \"course\")) def create_external_tool_in_course(self, course_id, json_data): return", "external tools for given course sis id. \"\"\" return self.get_external_tools_in_course(self._sis_id(sis_id, \"course\")) def create_external_tool_in_course(self,", "canvas account id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = ACCOUNTS_API.format(account_id) + \"/external_tools\" external_tools = []", "\"/external_tools\" external_tools = [] for data in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def", "Return external tools for given course sis id. \"\"\" return self.get_external_tools_in_course(self._sis_id(sis_id, \"course\")) def", "self._delete_external_tool(ACCOUNTS_API, account_id, external_tool_id) def _delete_external_tool(self, context, context_id, external_tool_id): \"\"\" Delete the external tool", "json_data) def create_external_tool_in_account(self, account_id, json_data): return self._create_external_tool(ACCOUNTS_API, account_id, json_data) def _create_external_tool(self, context, context_id,", "COURSES_API or ACCOUNTS_API. context_id is the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy", "account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update \"\"\" url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) return", "\"\"\" Create an external tool using the passed json_data. context is either COURSES_API", "tool_id) def get_sessionless_launch_url_from_account_sis_id( self, tool_id, account_sis_id): \"\"\" Get a sessionless launch url for", "a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( ACCOUNTS_API,", "the passed canvas account id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = ACCOUNTS_API.format(account_id) + \"/external_tools\" external_tools", "return self._update_external_tool(COURSES_API, course_id, external_tool_id, json_data) def update_external_tool_in_account(self, account_id, external_tool_id, json_data): return self._update_external_tool(ACCOUNTS_API, account_id,", "data in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def get_external_tools_in_course_by_sis_id(self, sis_id): \"\"\" Return external", "ACCOUNTS_API from uw_canvas.courses import COURSES_API class ExternalToolsException(Exception): pass class ExternalTools(Canvas): def get_external_tools_in_account(self, account_id,", "passed canvas account id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = ACCOUNTS_API.format(account_id) + \"/external_tools\" external_tools =", "by external_tool_id. context is either COURSES_API or ACCOUNTS_API. context_id is the course_id or", "json_data): return self._update_external_tool(COURSES_API, course_id, external_tool_id, json_data) def update_external_tool_in_account(self, account_id, external_tool_id, json_data): return self._update_external_tool(ACCOUNTS_API,", "data. context is either COURSES_API or ACCOUNTS_API. context_id is the course_id or account_id,", "url = context.format(context_id) + \"/external_tools/sessionless_launch\" params = {\"id\": tool_id} return self._get_resource(url, params) def", "id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = COURSES_API.format(course_id) + \"/external_tools\" external_tools = [] for data", "from uw_canvas.accounts import ACCOUNTS_API from uw_canvas.courses import COURSES_API class ExternalToolsException(Exception): pass class ExternalTools(Canvas):", "self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def get_external_tools_in_course_by_sis_id(self, sis_id): \"\"\" Return external tools for", "context_id is the Canvas course_id or account_id, depending on context. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create \"\"\" url", "= context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) return self._put_resource(url, body=json_data) def delete_external_tool_in_course(self, course_id, external_tool_id): return", "def _create_external_tool(self, context, context_id, json_data): \"\"\" Create an external tool using the passed", "def get_external_tools_in_account(self, account_id, params={}): \"\"\" Return external tools for the passed canvas account", "\"\"\" url = context.format(context_id) + \"/external_tools\" return self._post_resource(url, body=json_data) def update_external_tool_in_course(self, course_id, external_tool_id,", "[] for data in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def get_external_tools_in_account_by_sis_id(self, sis_id): \"\"\"", "passed json data. context is either COURSES_API or ACCOUNTS_API. context_id is the course_id", "\"\"\" return self.get_external_tools_in_course(self._sis_id(sis_id, \"course\")) def create_external_tool_in_course(self, course_id, json_data): return self._create_external_tool(COURSES_API, course_id, json_data) def", "for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" url = context.format(context_id) + \"/external_tools/sessionless_launch\" params =", "= context.format(context_id) + \"/external_tools\" return self._post_resource(url, body=json_data) def update_external_tool_in_course(self, course_id, external_tool_id, json_data): return", "\"\"\" return self.get_external_tools_in_account(self._sis_id(sis_id, \"account\")) def get_external_tools_in_course(self, course_id, params={}): \"\"\" Return external tools for", "a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" url = context.format(context_id)", "course_id, params={}): \"\"\" Return external tools for the passed canvas course id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index", "external_tools = [] for data in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def get_external_tools_in_account_by_sis_id(self,", "def get_external_tools_in_course(self, course_id, params={}): \"\"\" Return external tools for the passed canvas course", "https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( ACCOUNTS_API, account_id, tool_id) def get_sessionless_launch_url_from_account_sis_id( self, tool_id, account_sis_id): \"\"\"", "def delete_external_tool_in_course(self, course_id, external_tool_id): return self._delete_external_tool(COURSES_API, course_id, external_tool_id) def delete_external_tool_in_account(self, account_id, external_tool_id): return", "is either COURSES_API or ACCOUNTS_API. context_id is the Canvas course_id or account_id, depending", "url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( ACCOUNTS_API, account_id, tool_id) def", "self.get_external_tools_in_account(self._sis_id(sis_id, \"account\")) def get_external_tools_in_course(self, course_id, params={}): \"\"\" Return external tools for the passed", "by external_tool_id with the passed json data. context is either COURSES_API or ACCOUNTS_API.", "course_id, json_data): return self._create_external_tool(COURSES_API, course_id, json_data) def create_external_tool_in_account(self, account_id, json_data): return self._create_external_tool(ACCOUNTS_API, account_id,", "account_id, json_data) def _create_external_tool(self, context, context_id, json_data): \"\"\" Create an external tool using", "\"\"\" url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) response = self._delete_resource(url) return True def", "context is either COURSES_API or ACCOUNTS_API. context_id is the course_id or account_id, depending", "self._delete_resource(url) return True def _get_sessionless_launch_url(self, context, context_id, tool_id): \"\"\" Get a sessionless launch", "tools for given course sis id. \"\"\" return self.get_external_tools_in_course(self._sis_id(sis_id, \"course\")) def create_external_tool_in_course(self, course_id,", "context_id, json_data): \"\"\" Create an external tool using the passed json_data. context is", "url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) return self._put_resource(url, body=json_data) def delete_external_tool_in_course(self, course_id, external_tool_id):", "+ \"/external_tools\" return self._post_resource(url, body=json_data) def update_external_tool_in_course(self, course_id, external_tool_id, json_data): return self._update_external_tool(COURSES_API, course_id,", "url = context.format(context_id) + \"/external_tools\" return self._post_resource(url, body=json_data) def update_external_tool_in_course(self, course_id, external_tool_id, json_data):", "context_id, tool_id): \"\"\" Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch", "\"/external_tools/{}\".format( external_tool_id) return self._put_resource(url, body=json_data) def delete_external_tool_in_course(self, course_id, external_tool_id): return self._delete_external_tool(COURSES_API, course_id, external_tool_id)", "_update_external_tool(self, context, context_id, external_tool_id, json_data): \"\"\" Update the external tool identified by external_tool_id", "for the passed canvas course id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = COURSES_API.format(course_id) + \"/external_tools\"", "\"\"\" return self.get_sessionless_launch_url_from_account( tool_id, self._sis_id(account_sis_id, \"account\")) def get_sessionless_launch_url_from_course(self, tool_id, course_id): \"\"\" Get a", "ACCOUNTS_API. context_id is the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy \"\"\" url", "def _get_sessionless_launch_url(self, context, context_id, tool_id): \"\"\" Get a sessionless launch url for an", "def get_external_tools_in_course_by_sis_id(self, sis_id): \"\"\" Return external tools for given course sis id. \"\"\"", "a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( COURSES_API,", "data in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def get_external_tools_in_account_by_sis_id(self, sis_id): \"\"\" Return external", "https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( COURSES_API, course_id, tool_id) def get_sessionless_launch_url_from_course_sis_id( self, tool_id, course_sis_id): \"\"\"", "account_sis_id): \"\"\" Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\"", "account_id, external_tool_id) def _delete_external_tool(self, context, context_id, external_tool_id): \"\"\" Delete the external tool identified", "def get_sessionless_launch_url_from_account_sis_id( self, tool_id, account_sis_id): \"\"\" Get a sessionless launch url for an", "body=json_data) def update_external_tool_in_course(self, course_id, external_tool_id, json_data): return self._update_external_tool(COURSES_API, course_id, external_tool_id, json_data) def update_external_tool_in_account(self,", "external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( ACCOUNTS_API, account_id, tool_id) def get_sessionless_launch_url_from_account_sis_id( self, tool_id,", "Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" url =", "tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" url = context.format(context_id) + \"/external_tools/sessionless_launch\" params = {\"id\": tool_id} return", "\"\"\" Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return", "\"\"\" url = context.format(context_id) + \"/external_tools/sessionless_launch\" params = {\"id\": tool_id} return self._get_resource(url, params)", "\"account\")) def get_external_tools_in_course(self, course_id, params={}): \"\"\" Return external tools for the passed canvas", "json_data): \"\"\" Create an external tool using the passed json_data. context is either", "external_tool_id with the passed json data. context is either COURSES_API or ACCOUNTS_API. context_id", "return external_tools def get_external_tools_in_account_by_sis_id(self, sis_id): \"\"\" Return external tools for given account sis", "sis id. \"\"\" return self.get_external_tools_in_course(self._sis_id(sis_id, \"course\")) def create_external_tool_in_course(self, course_id, json_data): return self._create_external_tool(COURSES_API, course_id,", "sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( COURSES_API, course_id,", "params=params): external_tools.append(data) return external_tools def get_external_tools_in_account_by_sis_id(self, sis_id): \"\"\" Return external tools for given", "tool_id): \"\"\" Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\"", "get_sessionless_launch_url_from_course_sis_id( self, tool_id, course_sis_id): \"\"\" Get a sessionless launch url for an external", "the passed canvas course id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = COURSES_API.format(course_id) + \"/external_tools\" external_tools", "account_id, depending on context. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create \"\"\" url = context.format(context_id) + \"/external_tools\" return self._post_resource(url,", "ACCOUNTS_API.format(account_id) + \"/external_tools\" external_tools = [] for data in self._get_paged_resource(url, params=params): external_tools.append(data) return", "external_tools = [] for data in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def get_external_tools_in_course_by_sis_id(self,", "COURSES_API.format(course_id) + \"/external_tools\" external_tools = [] for data in self._get_paged_resource(url, params=params): external_tools.append(data) return", "context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update \"\"\" url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) return self._put_resource(url, body=json_data) def", "depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update \"\"\" url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) return self._put_resource(url,", "tool identified by external_tool_id with the passed json data. context is either COURSES_API", "self, tool_id, account_sis_id): \"\"\" Get a sessionless launch url for an external tool.", "uw_canvas.courses import COURSES_API class ExternalToolsException(Exception): pass class ExternalTools(Canvas): def get_external_tools_in_account(self, account_id, params={}): \"\"\"", "or ACCOUNTS_API. context_id is the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update \"\"\"", "course_id, external_tool_id) def delete_external_tool_in_account(self, account_id, external_tool_id): return self._delete_external_tool(ACCOUNTS_API, account_id, external_tool_id) def _delete_external_tool(self, context,", "either COURSES_API or ACCOUNTS_API. context_id is the Canvas course_id or account_id, depending on", "self._delete_external_tool(COURSES_API, course_id, external_tool_id) def delete_external_tool_in_account(self, account_id, external_tool_id): return self._delete_external_tool(ACCOUNTS_API, account_id, external_tool_id) def _delete_external_tool(self,", "the external tool identified by external_tool_id. context is either COURSES_API or ACCOUNTS_API. context_id", "self._sis_id(account_sis_id, \"account\")) def get_sessionless_launch_url_from_course(self, tool_id, course_id): \"\"\" Get a sessionless launch url for", "Delete the external tool identified by external_tool_id. context is either COURSES_API or ACCOUNTS_API.", "external_tool_id, json_data): return self._update_external_tool(COURSES_API, course_id, external_tool_id, json_data) def update_external_tool_in_account(self, account_id, external_tool_id, json_data): return", "course sis id. \"\"\" return self.get_external_tools_in_course(self._sis_id(sis_id, \"course\")) def create_external_tool_in_course(self, course_id, json_data): return self._create_external_tool(COURSES_API,", "_create_external_tool(self, context, context_id, json_data): \"\"\" Create an external tool using the passed json_data.", "[] for data in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def get_external_tools_in_course_by_sis_id(self, sis_id): \"\"\"", "the Canvas course_id or account_id, depending on context. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create \"\"\" url = context.format(context_id)", "sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" url = context.format(context_id) +", "self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def get_external_tools_in_account_by_sis_id(self, sis_id): \"\"\" Return external tools for", "update_external_tool_in_course(self, course_id, external_tool_id, json_data): return self._update_external_tool(COURSES_API, course_id, external_tool_id, json_data) def update_external_tool_in_account(self, account_id, external_tool_id,", "external_tool_id, json_data) def _update_external_tool(self, context, context_id, external_tool_id, json_data): \"\"\" Update the external tool", "ExternalTools(Canvas): def get_external_tools_in_account(self, account_id, params={}): \"\"\" Return external tools for the passed canvas", "def create_external_tool_in_course(self, course_id, json_data): return self._create_external_tool(COURSES_API, course_id, json_data) def create_external_tool_in_account(self, account_id, json_data): return", "url = COURSES_API.format(course_id) + \"/external_tools\" external_tools = [] for data in self._get_paged_resource(url, params=params):", "tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( COURSES_API, course_id, tool_id) def get_sessionless_launch_url_from_course_sis_id( self, tool_id, course_sis_id):", "get_external_tools_in_account(self, account_id, params={}): \"\"\" Return external tools for the passed canvas account id.", "delete_external_tool_in_course(self, course_id, external_tool_id): return self._delete_external_tool(COURSES_API, course_id, external_tool_id) def delete_external_tool_in_account(self, account_id, external_tool_id): return self._delete_external_tool(ACCOUNTS_API,", "Apache-2.0 from uw_canvas import Canvas from uw_canvas.accounts import ACCOUNTS_API from uw_canvas.courses import COURSES_API", "def get_sessionless_launch_url_from_account(self, tool_id, account_id): \"\"\" Get a sessionless launch url for an external", "return True def _get_sessionless_launch_url(self, context, context_id, tool_id): \"\"\" Get a sessionless launch url", "an external tool using the passed json_data. context is either COURSES_API or ACCOUNTS_API.", "+ \"/external_tools/{}\".format( external_tool_id) return self._put_resource(url, body=json_data) def delete_external_tool_in_course(self, course_id, external_tool_id): return self._delete_external_tool(COURSES_API, course_id,", "for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self.get_sessionless_launch_url_from_account( tool_id, self._sis_id(account_sis_id, \"account\")) def get_sessionless_launch_url_from_course(self,", "context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) response = self._delete_resource(url) return True def _get_sessionless_launch_url(self, context, context_id,", "tools for the passed canvas course id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = COURSES_API.format(course_id) +", "is the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy \"\"\" url = context.format(context_id)", "2021 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 from uw_canvas import Canvas from", "\"\"\" Update the external tool identified by external_tool_id with the passed json data.", "Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self.get_sessionless_launch_url_from_course(", "or account_id, depending on context. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create \"\"\" url = context.format(context_id) + \"/external_tools\" return", "self._get_sessionless_launch_url( ACCOUNTS_API, account_id, tool_id) def get_sessionless_launch_url_from_account_sis_id( self, tool_id, account_sis_id): \"\"\" Get a sessionless", "\"\"\" url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) return self._put_resource(url, body=json_data) def delete_external_tool_in_course(self, course_id,", "uw_canvas.accounts import ACCOUNTS_API from uw_canvas.courses import COURSES_API class ExternalToolsException(Exception): pass class ExternalTools(Canvas): def", "def _delete_external_tool(self, context, context_id, external_tool_id): \"\"\" Delete the external tool identified by external_tool_id.", "context is either COURSES_API or ACCOUNTS_API. context_id is the Canvas course_id or account_id,", "_delete_external_tool(self, context, context_id, external_tool_id): \"\"\" Delete the external tool identified by external_tool_id. context", "passed canvas course id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = COURSES_API.format(course_id) + \"/external_tools\" external_tools =", "external_tools def get_external_tools_in_course_by_sis_id(self, sis_id): \"\"\" Return external tools for given course sis id.", "self._put_resource(url, body=json_data) def delete_external_tool_in_course(self, course_id, external_tool_id): return self._delete_external_tool(COURSES_API, course_id, external_tool_id) def delete_external_tool_in_account(self, account_id,", "= self._delete_resource(url) return True def _get_sessionless_launch_url(self, context, context_id, tool_id): \"\"\" Get a sessionless", "using the passed json_data. context is either COURSES_API or ACCOUNTS_API. context_id is the", "external tool identified by external_tool_id. context is either COURSES_API or ACCOUNTS_API. context_id is", "\"\"\" Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" url", "context.format(context_id) + \"/external_tools/sessionless_launch\" params = {\"id\": tool_id} return self._get_resource(url, params) def get_sessionless_launch_url_from_account(self, tool_id,", "Canvas course_id or account_id, depending on context. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create \"\"\" url = context.format(context_id) +", "json_data): return self._create_external_tool(ACCOUNTS_API, account_id, json_data) def _create_external_tool(self, context, context_id, json_data): \"\"\" Create an", "the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update \"\"\" url = context.format(context_id) +", "SPDX-License-Identifier: Apache-2.0 from uw_canvas import Canvas from uw_canvas.accounts import ACCOUNTS_API from uw_canvas.courses import", "_get_sessionless_launch_url(self, context, context_id, tool_id): \"\"\" Get a sessionless launch url for an external", "or ACCOUNTS_API. context_id is the Canvas course_id or account_id, depending on context. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create", "json_data) def update_external_tool_in_account(self, account_id, external_tool_id, json_data): return self._update_external_tool(ACCOUNTS_API, account_id, external_tool_id, json_data) def _update_external_tool(self,", "in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def get_external_tools_in_account_by_sis_id(self, sis_id): \"\"\" Return external tools", "in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def get_external_tools_in_course_by_sis_id(self, sis_id): \"\"\" Return external tools", "on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy \"\"\" url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) response = self._delete_resource(url)", "Create an external tool using the passed json_data. context is either COURSES_API or", "url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" url = context.format(context_id) + \"/external_tools/sessionless_launch\" params", "return self._create_external_tool(ACCOUNTS_API, account_id, json_data) def _create_external_tool(self, context, context_id, json_data): \"\"\" Create an external", "tools for the passed canvas account id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = ACCOUNTS_API.format(account_id) +", "an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( ACCOUNTS_API, account_id, tool_id) def get_sessionless_launch_url_from_account_sis_id( self,", "tool using the passed json_data. context is either COURSES_API or ACCOUNTS_API. context_id is", "return self._put_resource(url, body=json_data) def delete_external_tool_in_course(self, course_id, external_tool_id): return self._delete_external_tool(COURSES_API, course_id, external_tool_id) def delete_external_tool_in_account(self,", "+ \"/external_tools\" external_tools = [] for data in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools", "context, context_id, json_data): \"\"\" Create an external tool using the passed json_data. context", "tool identified by external_tool_id. context is either COURSES_API or ACCOUNTS_API. context_id is the", "an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self.get_sessionless_launch_url_from_account( tool_id, self._sis_id(account_sis_id, \"account\")) def get_sessionless_launch_url_from_course(self, tool_id,", "Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url(", "Return external tools for given account sis id. \"\"\" return self.get_external_tools_in_account(self._sis_id(sis_id, \"account\")) def", "account_id, json_data): return self._create_external_tool(ACCOUNTS_API, account_id, json_data) def _create_external_tool(self, context, context_id, json_data): \"\"\" Create", "given account sis id. \"\"\" return self.get_external_tools_in_account(self._sis_id(sis_id, \"account\")) def get_external_tools_in_course(self, course_id, params={}): \"\"\"", "sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self.get_sessionless_launch_url_from_account( tool_id, self._sis_id(account_sis_id,", "external_tool_id. context is either COURSES_API or ACCOUNTS_API. context_id is the course_id or account_id,", "for the passed canvas account id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = ACCOUNTS_API.format(account_id) + \"/external_tools\"", "external_tools.append(data) return external_tools def get_external_tools_in_account_by_sis_id(self, sis_id): \"\"\" Return external tools for given account", "course_id, external_tool_id): return self._delete_external_tool(COURSES_API, course_id, external_tool_id) def delete_external_tool_in_account(self, account_id, external_tool_id): return self._delete_external_tool(ACCOUNTS_API, account_id,", "from uw_canvas.courses import COURSES_API class ExternalToolsException(Exception): pass class ExternalTools(Canvas): def get_external_tools_in_account(self, account_id, params={}):", "get_external_tools_in_course_by_sis_id(self, sis_id): \"\"\" Return external tools for given course sis id. \"\"\" return", "= [] for data in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def get_external_tools_in_course_by_sis_id(self, sis_id):", "external_tool_id) return self._put_resource(url, body=json_data) def delete_external_tool_in_course(self, course_id, external_tool_id): return self._delete_external_tool(COURSES_API, course_id, external_tool_id) def", "context_id, external_tool_id, json_data): \"\"\" Update the external tool identified by external_tool_id with the", "+ \"/external_tools/sessionless_launch\" params = {\"id\": tool_id} return self._get_resource(url, params) def get_sessionless_launch_url_from_account(self, tool_id, account_id):", "tool_id) def get_sessionless_launch_url_from_course_sis_id( self, tool_id, course_sis_id): \"\"\" Get a sessionless launch url for", "external tool identified by external_tool_id with the passed json data. context is either", "\"/external_tools/sessionless_launch\" params = {\"id\": tool_id} return self._get_resource(url, params) def get_sessionless_launch_url_from_account(self, tool_id, account_id): \"\"\"", "external tool using the passed json_data. context is either COURSES_API or ACCOUNTS_API. context_id", "COURSES_API class ExternalToolsException(Exception): pass class ExternalTools(Canvas): def get_external_tools_in_account(self, account_id, params={}): \"\"\" Return external", "or ACCOUNTS_API. context_id is the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy \"\"\"", "return self.get_external_tools_in_account(self._sis_id(sis_id, \"account\")) def get_external_tools_in_course(self, course_id, params={}): \"\"\" Return external tools for the", "params={}): \"\"\" Return external tools for the passed canvas account id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\"", "return self._get_resource(url, params) def get_sessionless_launch_url_from_account(self, tool_id, account_id): \"\"\" Get a sessionless launch url", "COURSES_API, course_id, tool_id) def get_sessionless_launch_url_from_course_sis_id( self, tool_id, course_sis_id): \"\"\" Get a sessionless launch", "an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" url = context.format(context_id) + \"/external_tools/sessionless_launch\" params = {\"id\":", "depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy \"\"\" url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) response =", "params=params): external_tools.append(data) return external_tools def get_external_tools_in_course_by_sis_id(self, sis_id): \"\"\" Return external tools for given", "sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( ACCOUNTS_API, account_id,", "ACCOUNTS_API. context_id is the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update \"\"\" url", "for data in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def get_external_tools_in_account_by_sis_id(self, sis_id): \"\"\" Return", "id. \"\"\" return self.get_external_tools_in_account(self._sis_id(sis_id, \"account\")) def get_external_tools_in_course(self, course_id, params={}): \"\"\" Return external tools", "import ACCOUNTS_API from uw_canvas.courses import COURSES_API class ExternalToolsException(Exception): pass class ExternalTools(Canvas): def get_external_tools_in_account(self,", "pass class ExternalTools(Canvas): def get_external_tools_in_account(self, account_id, params={}): \"\"\" Return external tools for the", "create_external_tool_in_course(self, course_id, json_data): return self._create_external_tool(COURSES_API, course_id, json_data) def create_external_tool_in_account(self, account_id, json_data): return self._create_external_tool(ACCOUNTS_API,", "external_tool_id, json_data): return self._update_external_tool(ACCOUNTS_API, account_id, external_tool_id, json_data) def _update_external_tool(self, context, context_id, external_tool_id, json_data):", "tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( ACCOUNTS_API, account_id, tool_id) def get_sessionless_launch_url_from_account_sis_id( self, tool_id, account_sis_id):", "tool_id, account_id): \"\"\" Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch", "{\"id\": tool_id} return self._get_resource(url, params) def get_sessionless_launch_url_from_account(self, tool_id, account_id): \"\"\" Get a sessionless", "launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" url = context.format(context_id) + \"/external_tools/sessionless_launch\"", "sis_id): \"\"\" Return external tools for given account sis id. \"\"\" return self.get_external_tools_in_account(self._sis_id(sis_id,", "json_data) def _update_external_tool(self, context, context_id, external_tool_id, json_data): \"\"\" Update the external tool identified", "https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = COURSES_API.format(course_id) + \"/external_tools\" external_tools = [] for data in", "True def _get_sessionless_launch_url(self, context, context_id, tool_id): \"\"\" Get a sessionless launch url for", "params) def get_sessionless_launch_url_from_account(self, tool_id, account_id): \"\"\" Get a sessionless launch url for an", "params = {\"id\": tool_id} return self._get_resource(url, params) def get_sessionless_launch_url_from_account(self, tool_id, account_id): \"\"\" Get", "id. \"\"\" return self.get_external_tools_in_course(self._sis_id(sis_id, \"course\")) def create_external_tool_in_course(self, course_id, json_data): return self._create_external_tool(COURSES_API, course_id, json_data)", "or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy \"\"\" url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id)", "for given account sis id. \"\"\" return self.get_external_tools_in_account(self._sis_id(sis_id, \"account\")) def get_external_tools_in_course(self, course_id, params={}):", "external_tool_id): return self._delete_external_tool(COURSES_API, course_id, external_tool_id) def delete_external_tool_in_account(self, account_id, external_tool_id): return self._delete_external_tool(ACCOUNTS_API, account_id, external_tool_id)", "account_id, external_tool_id): return self._delete_external_tool(ACCOUNTS_API, account_id, external_tool_id) def _delete_external_tool(self, context, context_id, external_tool_id): \"\"\" Delete", "the passed json_data. context is either COURSES_API or ACCOUNTS_API. context_id is the Canvas", "context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) return self._put_resource(url, body=json_data) def delete_external_tool_in_course(self, course_id, external_tool_id): return self._delete_external_tool(COURSES_API,", "def get_sessionless_launch_url_from_course_sis_id( self, tool_id, course_sis_id): \"\"\" Get a sessionless launch url for an", "= [] for data in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools def get_external_tools_in_account_by_sis_id(self, sis_id):", "json data. context is either COURSES_API or ACCOUNTS_API. context_id is the course_id or", "self._update_external_tool(COURSES_API, course_id, external_tool_id, json_data) def update_external_tool_in_account(self, account_id, external_tool_id, json_data): return self._update_external_tool(ACCOUNTS_API, account_id, external_tool_id,", "https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self.get_sessionless_launch_url_from_account( tool_id, self._sis_id(account_sis_id, \"account\")) def get_sessionless_launch_url_from_course(self, tool_id, course_id): \"\"\" Get", "passed json_data. context is either COURSES_API or ACCOUNTS_API. context_id is the Canvas course_id", "external_tools def get_external_tools_in_account_by_sis_id(self, sis_id): \"\"\" Return external tools for given account sis id.", "https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update \"\"\" url = context.format(context_id) + \"/external_tools/{}\".format( external_tool_id) return self._put_resource(url, body=json_data) def delete_external_tool_in_course(self,", "json_data): \"\"\" Update the external tool identified by external_tool_id with the passed json", "response = self._delete_resource(url) return True def _get_sessionless_launch_url(self, context, context_id, tool_id): \"\"\" Get a", "external_tool_id, json_data): \"\"\" Update the external tool identified by external_tool_id with the passed", "course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update \"\"\" url = context.format(context_id) + \"/external_tools/{}\".format(", "account_id): \"\"\" Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\"", "return self._update_external_tool(ACCOUNTS_API, account_id, external_tool_id, json_data) def _update_external_tool(self, context, context_id, external_tool_id, json_data): \"\"\" Update", "context.format(context_id) + \"/external_tools\" return self._post_resource(url, body=json_data) def update_external_tool_in_course(self, course_id, external_tool_id, json_data): return self._update_external_tool(COURSES_API,", "id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\" url = ACCOUNTS_API.format(account_id) + \"/external_tools\" external_tools = [] for data", "params={}): \"\"\" Return external tools for the passed canvas course id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index \"\"\"", "account_id, params={}): \"\"\" Return external tools for the passed canvas account id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index", "self._create_external_tool(ACCOUNTS_API, account_id, json_data) def _create_external_tool(self, context, context_id, json_data): \"\"\" Create an external tool", "def create_external_tool_in_account(self, account_id, json_data): return self._create_external_tool(ACCOUNTS_API, account_id, json_data) def _create_external_tool(self, context, context_id, json_data):", "is the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update \"\"\" url = context.format(context_id)", "return self._delete_external_tool(ACCOUNTS_API, account_id, external_tool_id) def _delete_external_tool(self, context, context_id, external_tool_id): \"\"\" Delete the external", "Get a sessionless launch url for an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self.get_sessionless_launch_url_from_account(", "self.get_sessionless_launch_url_from_account( tool_id, self._sis_id(account_sis_id, \"account\")) def get_sessionless_launch_url_from_course(self, tool_id, course_id): \"\"\" Get a sessionless launch", "def update_external_tool_in_account(self, account_id, external_tool_id, json_data): return self._update_external_tool(ACCOUNTS_API, account_id, external_tool_id, json_data) def _update_external_tool(self, context,", "update_external_tool_in_account(self, account_id, external_tool_id, json_data): return self._update_external_tool(ACCOUNTS_API, account_id, external_tool_id, json_data) def _update_external_tool(self, context, context_id,", "an external tool. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch \"\"\" return self._get_sessionless_launch_url( COURSES_API, course_id, tool_id) def get_sessionless_launch_url_from_course_sis_id( self," ]
[ "+ m.GlobalLoadGenerateMismatch[t] return constraint def calculate_regulating_reserve_up_available_per_generator(m, g, t): return m.RegulatingReserveUpAvailable[g, t] == m.MaximumPowerAvailable[g,t]", "of subsequent consecutive time periods that the unit is required to be on.", "== 0: return sum(m.UnitOn[g, n] for n in m.TimePeriods if n >= t", "pw_constr_type='LB', warning_tol=1e-20) model.ComputeHotStart = Constraint(model.Generators, model.TimePeriods, rule=compute_hot_start_rule) model.ComputeStartupCostsMinusM = Constraint(model.Generators, model.TimePeriods, rule=compute_startup_costs_rule_minusM) model.ComputeShutdownCosts", "standard ramp-down limit minus shutdown ramp limit plus maximum generator output - this", "ramp-down limit minus shutdown ramp limit plus maximum generator output - this is", "for g in m.GeneratorsAtBus[b]) if has_storage is True: constraint = constraint + sum(m.PowerOutputStorage[s,", "m.UnitOnT0[g]) ) else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] >= \\ -1 *", "def constraint_line(model, ptdf=None, slack_bus=1): model.LinePowerConstraintLower = Constraint(model.TransmissionLines, model.TimePeriods, rule=lower_line_power_bounds_rule) model.LinePowerConstraintHigher = Constraint(model.TransmissionLines, model.TimePeriods,", "t] == m.LoadGenerateMismatch[b, t] def global_posneg_rule(m, t): return m.posGlobalLoadGenerateMismatch[t] - m.negGlobalLoadGenerateMismatch[t] == m.GlobalLoadGenerateMismatch[t]", "for n in m.TimePeriods if n >= t) >= 0.0 def commitment_in_stage_st_cost_rule(m, st):", "unit is shut down in # this interval, it must remain off-line until", "constraint_for_cost(model): model.ComputeProductionCosts = Piecewise(model.Generators * model.TimePeriods, model.ProductionCost, model.PowerGenerated, pw_pts=model.PowerGenerationPiecewisePoints, f_rule=production_cost_function, pw_constr_type='LB', warning_tol=1e-20) model.ComputeHotStart", "m.GlobalLoadGenerateMismatch[t] def enforce_reserve_requirements_rule(m, t, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=False): constraint = sum(m.MaximumPowerAvailable[g, t] for g", "= 0 (degenerate upper bound) # (0, 1) - unit switching on: RHS", "in m.GeneratorsInReserveZone[rz]) >= m.ZonalReserveRequirement[rz, t] def enforce_generator_output_limits_rule_part_a(m, g, t): return m.MinimumPowerOutput[g, t] *", "from functools import partial import logging from pyomo.environ import * logger = logging.getLogger(__file__)", "was off in the previous time period but on in this one =>", "range(1, t) ) else: return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i", "== m.GenerationStageCost[st] + m.CommitmentStageCost[st] def total_cost_objective_rule(m): return sum(m.StageCost[st] for st in m.StageSet) def", "rule=enforce_zonal_reserve_requirement_rule) def constraint_generator_power(model): model.EnforceGeneratorOutputLimitsPartA = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_a) model.EnforceGeneratorOutputLimitsPartB = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_b)", "* (m.UnitOnT0[g] - m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g])", "* (1 - m.UnitOn[g, t]) def enforce_max_available_ramp_down_rates_rule(m, g, t): # 4 cases, split", "return sum(m.UnitOn[g, n] for n in m.TimePeriods if n >= t and n", "* ( m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1]) ) # compute startup", "Constraint(model.Generators, model.TimePeriods, rule=compute_startup_costs_rule_minusM) model.ComputeShutdownCosts = Constraint(model.Generators, model.TimePeriods, rule=compute_shutdown_costs_rule) model.Compute_commitment_in_stage_st_cost = Constraint(model.StageSet, rule =", "Constraint.Skip else: return m.LinePower[l,t] == m.B[l] * (m.Angle[m.BusFrom[l], t] - m.Angle[m.BusTo[l], t]) def", "t in m.GenerationTimeInStage[st]) + \\ sum(m.posGlobalLoadGenerateMismatch[t] + m.negGlobalLoadGenerateMismatch[t] for t in m.GenerationTimeInStage[st])) def", "the mismatch def posneg_rule(m, b, t): return m.posLoadGenerateMismatch[b, t] - m.negLoadGenerateMismatch[b, t] ==", "t]) else: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t])", "conditions mentioned in the above note if t == 0: # Not 100%", "status # (0, 0) - unit staying off: RHS = 0 (degenerate upper", "b, t, has_storage=False, has_non_dispatchable_generators=False): # Power balance at each node (S) # bus", "it must remain on-line until the end of the time span. if t", "Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_a) model.EnforceGeneratorOutputLimitsPartB = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_b) model.EnforceGeneratorOutputLimitsPartC = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_c)", "\".format(m.BusFrom[l], m.BusTo[l])) return Constraint.Skip else: return m.LinePower[l,t] == m.B[l] * (m.Angle[m.BusFrom[l], t] -", "if t == 0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] *", "compute_hot_start_rule(m, g, t): if t <= value(m.ColdStartHours[g]): if t - value(m.ColdStartHours[g]) <= value(m.UnitOnT0State[g]):", "(1, 1) - unit staying on: RHS = maximum generator output (degenerate upper", "t-1]) else: # handle the final (MinimumUpTime[g] - 1) time periods - if", "m.CommitmentTimeInStage[st]) * m.TimePeriodLength for g in m.Generators)) def generation_in_stage_st_cost_rule(m, st): return m.GenerationStageCost[st] ==", "- (m.UnitOn[g, t] - m.UnitOn[g, t-1])) for n in m.TimePeriods if n >=", "in m.CommitmentTimeInStage[st]) * m.TimePeriodLength for g in m.Generators)) def generation_in_stage_st_cost_rule(m, st): return m.GenerationStageCost[st]", "else: return m.MaximumPowerAvailable[g, t] <= m.PowerGenerated[g, t-1] + \\ m.NominalRampUpLimit[g] * m.UnitOn[g, t-1]", "negative parts of the mismatch def posneg_rule(m, b, t): return m.posLoadGenerateMismatch[b, t] -", "for i in range(1, t) ) else: return m.HotStart[g, t] <= sum( m.UnitOn[g,", "posneg_rule(m, b, t): return m.posLoadGenerateMismatch[b, t] - m.negLoadGenerateMismatch[b, t] == m.LoadGenerateMismatch[b, t] def", "if has_non_dispatchable_generators is True: constraint = constraint + sum(m.NondispatchablePowerUsed[n,t] for n in m.NondispatchableGenerators)", "return m.LinePower[l,t] == m.B[l] * (m.Angle[m.BusFrom[l], t] - m.Angle[m.BusTo[l], t]) def calculate_total_demand(m, t):", "t): return m.MinimumPowerOutput[g, t] * m.UnitOn[g, t] <= m.PowerGenerated[g,t] def enforce_generator_output_limits_rule_part_b(m, g, t):", "(m.UnitOn[g, t] - m.UnitOn[g, t+1]) #This version fixes the problem with ignoring initial", "m.NominalRampUpLimit[g] * m.UnitOnT0[g] + \\ m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) + \\", "= Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_b) model.EnforceGeneratorOutputLimitsPartC = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_c) model.EnforceMaxAvailableRampUpRates = Constraint(model.Generators, model.TimePeriods,", "ramp limit minus startup ramp limit plus maximum power output (degenerate upper bound", "def constraint_power_balance(model, has_storage=False, has_non_dispatchable_generators=False): fn_power_balance = partial(power_balance, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.PowerBalance = Constraint(model.Buses, model.TimePeriods,", "return Constraint.Skip else: return m.LinePower[l,t] == m.B[l] * (m.Angle[m.BusFrom[l], t] - m.Angle[m.BusTo[l], t])", "has_non_dispatchable_generators=False): constraint = sum((1 - m.GeneratorForcedOutage[g,t]) * m.GeneratorBusContributionFactor[g, b] * m.PowerGenerated[g, t] for", "and negative parts of the mismatch def posneg_rule(m, b, t): return m.posLoadGenerateMismatch[b, t]", "if the unit was off in the previous time period but on in", "periods that the unit is required to be on. if t == 0:", "(0, 1) - unit switching on: RHS = startup ramp limit # (1,", "constraint == m.TotalDemand[t] + m.GlobalLoadGenerateMismatch[t] return constraint def calculate_regulating_reserve_up_available_per_generator(m, g, t): return m.RegulatingReserveUpAvailable[g,", "power output (degenerate upper bound due to unit off) # (1, 1) -", "m.TimePeriods if n >= t) >= 0.0 else: return sum((m.UnitOn[g, n] - (m.UnitOn[g,", "<= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g,t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g,t])", "= generation_in_stage_st_cost_rule) model.Compute_Stage_Cost = Constraint(model.StageSet, rule = StageCost_rule) def objective_function(model): model.TotalCostObjective = Objective(rule=total_cost_objective_rule,", "+ \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) def enforce_ramp_down_limits_rule(m, g, t):", "t] - m.UnitOn[g, t+1]) #This version fixes the problem with ignoring initial conditions", "is True: fn_enforce_reserve_requirements = partial(enforce_reserve_requirements_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators, has_global_reserves=has_global_reserves) model.EnforceReserveRequirements = Constraint(model.TimePeriods, rule=fn_enforce_reserve_requirements) if", "else: return Constraint.Skip def line_power_ptdf_rule(m, l, t): return m.LinePower[l,t] == sum(float(m.PTDF[l, i]) *", "per-generator, per-time period shutdown costs. def compute_shutdown_costs_rule(m, g, t): if t == 0:", "Constraint(model.StageSet, rule = commitment_in_stage_st_cost_rule) model.Compute_generation_in_stage_st_cost = Constraint(model.StageSet, rule = generation_in_stage_st_cost_rule) model.Compute_Stage_Cost = Constraint(model.StageSet,", "unit switching on: RHS = standard ramp-down limit minus shutdown ramp limit plus", "(m.Angle[m.BusFrom[l], t] - m.Angle[m.BusTo[l], t]) def calculate_total_demand(m, t): return m.TotalDemand[t] == sum(m.Demand[b,t] for", "b, t, has_storage=False, has_non_dispatchable_generators=False): constraint = sum((1 - m.GeneratorForcedOutage[g,t]) * m.GeneratorBusContributionFactor[g, b] *", "(degenerate upper bound) # (0, 1) - unit switching on: RHS = maximum", "return m.Angle[m.Buses[slack_bus], t] == 0.0 def lower_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l])", "the EnforceUpTimeConstraintInitial constraint. return Constraint.Skip elif t <= (value(m.NumTimePeriods - m.MinimumUpTime[g]) + 1):", "l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return m.ThermalLimit[l] >= m.LinePower[l, t]", "m.CommitmentStageCost[st] == (sum(m.StartupCost[g,t] + m.ShutdownCost[g,t] for g in m.Generators for t in m.CommitmentTimeInStage[st])", "t] - m.UnitOn[g, t-1]) else: # handle the final (MinimumUpTime[g] - 1) time", "= Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule) model.NegLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule) model.Defineposneg_Mismatch = Constraint(model.Buses, model.TimePeriods, rule =", "(m.UnitOnT0[g] - m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) else:", "the strangest case. # (1, 0) - unit switching off: RHS = shutdown", "return m.MaximumPowerAvailable[g, t] <= m.PowerGenerated[g, t-1] + \\ m.NominalRampUpLimit[g] * m.UnitOn[g, t-1] +", "ramp limit # (1, 0) - unit switching off: RHS = standard ramp", "time horizons are specified return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOnT0[g])) for", "required to be on. if t == 0: return sum((1 - m.UnitOn[g, n])", "staying off: RHS = maximum generator output (degenerate upper bound) # (0, 1)", "m.TimePeriods if n >= t and n <= (t + value(m.MinimumDownTime[g]) - 1))", "until the end of the time span. if t == 0: # can", "return Constraint.Skip def line_power_ptdf_rule(m, l, t): return m.LinePower[l,t] == sum(float(m.PTDF[l, i]) * m.NetPowerInjectionAtBus[b,", "# (1, 1) - unit staying on: RHS = standard ramp limit plus", "model.TimePeriods, rule = posneg_rule) model.Global_Defineposneg_Mismatch = Constraint(model.TimePeriods, rule = global_posneg_rule) def constraint_power_balance(model, has_storage=False,", "m.UnitOn[g,t]) else: return m.MaximumPowerAvailable[g, t-1] <= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] +", "- m.UnitOnT0[g]) ) else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] >= \\ -1", "but if a generator has ShutdownRampLimit >> MaximumPowerOutput, this constraint causes problems #", "has_zonal_reserves=False): if has_global_reserves is True: fn_enforce_reserve_requirements = partial(enforce_reserve_requirements_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators, has_global_reserves=has_global_reserves) model.EnforceReserveRequirements =", "+ sum(sum(m.UnitOn[g,t] * m.MinimumProductionCost[g, t] for t in m.CommitmentTimeInStage[st]) * m.TimePeriodLength for g", "m.NondispatchableGeneratorsAtBus[b]) constraint = constraint + m.LoadGenerateMismatch[b,t] constraint = constraint - m.Demand[b, t] constraint", "minus startup ramp limit plus maximum power output (degenerate upper bound due to", "bound) #NOTE: As expressed in Carrion-Arroyo and subsequently here, this constraint does NOT", "0: return Constraint.Skip return sum((1 - m.UnitOn[g, t]) for t in m.TimePeriods if", "time period if t == 0: return m.MaximumPowerAvailable[g, t] <= m.PowerGeneratedT0[g] + \\", "#NOTE: As expressed in Carrion-Arroyo and subsequently here, this constraint does NOT consider", "this constraint does NOT consider ramp down from initial conditions to t=1! #if", "t] else: return Constraint.Skip def line_power_ptdf_rule(m, l, t): return m.LinePower[l,t] == sum(float(m.PTDF[l, i])", "m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g]", "ramp down from initial conditions to t=1! #if t == value(m.NumTimePeriods): # return", "t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g,", "= Piecewise(model.Generators * model.TimePeriods, model.ProductionCost, model.PowerGenerated, pw_pts=model.PowerGenerationPiecewisePoints, f_rule=production_cost_function, pw_constr_type='LB', warning_tol=1e-20) model.ComputeHotStart = Constraint(model.Generators,", "\\ m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) else: return sum(m.UnitOn[g, n] for n", "- sum(m.PowerInputStorage[s, t] for s in m.StorageAtBus[b]) if has_non_dispatchable_generators is True: constraint =", "0.0 # constraint due to initial conditions. def enforce_down_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOffLine[g]) ==", "time t (S) constraint = m.NetPowerInjectionAtBus[b, t] + sum(m.LinePower[l,t] for l in m.LinesTo[b])", "= Constraint(model.TimePeriods, rule=fn_enforce_reserve_requirements) if has_regulating_reserves is True: model.CalculateRegulatingReserveUpPerGenerator = Constraint(model.Generators, model.TimePeriods, rule=calculate_regulating_reserve_up_available_per_generator) if", "s in m.StorageAtBus[b]) \\ - sum(m.PowerInputStorage[s, t] for s in m.StorageAtBus[b]) if has_non_dispatchable_generators", "time period after that not involving the initial condition. @simple_constraint_rule def enforce_up_time_constraints_subsequent(m, g,", "for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOffLine[g])) == 0.0 # constraint for", "StageCost_rule(m, st): return m.StageCost[st] == m.GenerationStageCost[st] + m.CommitmentStageCost[st] def total_cost_objective_rule(m): return sum(m.StageCost[st] for", "has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.PowerBalance = Constraint(model.Buses, model.TimePeriods, rule=fn_power_balance) def constraint_reserves(model, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=True, has_regulating_reserves=True,", "m.UnitOnT0[g])) else: return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \\", "constraint + m.LoadGenerateMismatch[b,t] constraint = constraint - m.Demand[b, t] constraint = m.NetPowerInjectionAtBus[b, t]", "generator has ShutdownRampLimit >> MaximumPowerOutput, this constraint causes problems # (1, 0) -", "model.Defineposneg_Mismatch = Constraint(model.Buses, model.TimePeriods, rule = posneg_rule) model.Global_Defineposneg_Mismatch = Constraint(model.TimePeriods, rule = global_posneg_rule)", "== constraint return constraint # give meaning to the positive and negative parts", "Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_rule) def constraint_total_demand(model): model.CalculateTotalDemand = Constraint(model.TimePeriods, rule=calculate_total_demand) def constraint_load_generation_mismatch(model): model.PosLoadGenerateMismatchTolerance =", "+ m.LoadGenerateMismatch[b,t] == 0 return constraint def net_power_at_bus_rule(m, b, t, has_storage=False, has_non_dispatchable_generators=False): constraint", "return m.MaximumPowerAvailable[g, t] <= \\ # m.MaximumPowerOutput[g] * m.UnitOn[g, t+1] + \\ #", "n in m.TimePeriods if n >= t) >= 0.0 def commitment_in_stage_st_cost_rule(m, st): return", "on. if t == 0: return sum((1 - m.UnitOn[g, n]) for n in", "- (m.UnitOn[g, t] - m.UnitOnT0[g])) else: return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g]", ") + \\ -1 * ( m.StartupRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t])", "= startup ramp limit # (1, 0) - unit switching off: RHS =", "0: # Not 100% sure of this one since there is no MaximumPowerAvailableT0", "t] == 0.0 def lower_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps):", "each node (S) # bus b, time t (S) constraint = m.NetPowerInjectionAtBus[b, t]", "\\ # m.MaximumPowerOutput[g] * m.UnitOn[g, t+1] + \\ # m.ShutdownRampLimit[g] * (m.UnitOn[g, t]", "(degenerate upper bound) # (0, 1) - unit switching on: RHS = standard", "interval, it must remain off-line until the end of the time span. if", "rule = posneg_rule) model.Global_Defineposneg_Mismatch = Constraint(model.TimePeriods, rule = global_posneg_rule) def constraint_power_balance(model, has_storage=False, has_non_dispatchable_generators=False):", "\\ m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1]) + \\ m.MaximumPowerOutput[g, t] *", "4 cases, split by (t-1, t) unit status (RHS is defined as the", "t <= (value(m.NumTimePeriods - m.MinimumDownTime[g]) + 1): # the right-hand side terms below", "t] <= m.PowerGeneratedT0[g] + \\ m.NominalRampUpLimit[g] * m.UnitOnT0[g] + \\ m.StartupRampLimit[g] * (m.UnitOn[g,", ">= \\ m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) else: return sum(m.UnitOn[g, n] for", "in m.TimePeriods if t <= value(m.InitialTimePeriodsOffLine[g])) == 0.0 # constraint for each time", "the delta from m.PowerGenerated[g, t-1]) # (0, 0) - unit staying off: RHS", "* m.UnitOn[g, t-1] + \\ m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1]) +", "return constraint def calculate_regulating_reserve_up_available_per_generator(m, g, t): return m.RegulatingReserveUpAvailable[g, t] == m.MaximumPowerAvailable[g,t] - m.PowerGenerated[g,t]", "m.PowerGenerated[g,t] def enforce_generator_output_limits_rule_part_b(m, g, t): return m.PowerGenerated[g,t] <= m.MaximumPowerAvailable[g, t] def enforce_generator_output_limits_rule_part_c(m, g,", "n]) - (m.UnitOn[g, t-1] - m.UnitOn[g, t])) for n in m.TimePeriods if n", "and n <= (t + value(m.MinimumDownTime[g]) - 1)) >= \\ m.MinimumDownTime[g] * (m.UnitOnT0[g]", "constraint causes problems # (1, 0) - unit switching off: RHS = shutdown", "constraint # give meaning to the positive and negative parts of the mismatch", "m.CommitmentStageCost[st] def total_cost_objective_rule(m): return sum(m.StageCost[st] for st in m.StageSet) def constraint_net_power(model, has_storage=False, has_non_dispatchable_generators=False):", "m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1]) def enforce_ramp_up_limits_rule(m,", "- m.UnitOn[g, t]) else: return sum((1 - m.UnitOn[g, n]) for n in m.TimePeriods", "m.ReserveRequirement[t] constraint = constraint == m.TotalDemand[t] + m.GlobalLoadGenerateMismatch[t] return constraint def calculate_regulating_reserve_up_available_per_generator(m, g,", "t]) ) + \\ -1 * ( m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g])", "g, t): if t == 0: return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g]", "t] - m.UnitOn[g, t-1])) for n in m.TimePeriods if n >= t) >=", "m.UnitOn[g, t] ) + \\ -1 * ( m.StartupRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g,", "* (m.UnitOn[g, t] - m.UnitOnT0[g]) else: return sum(m.UnitOn[g, n] for n in m.TimePeriods", "(sum(m.posLoadGenerateMismatch[b, t] + m.negLoadGenerateMismatch[b, t] for b in m.Buses for t in m.GenerationTimeInStage[st])", "be on. if t == 0: return sum(m.UnitOn[g, n] for n in m.TimePeriods", "t): if t == 0: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOnT0[g] -", "def enforce_up_time_constraints_subsequent(m, g, t): if t <= value(m.InitialTimePeriodsOnLine[g]): # handled by the EnforceUpTimeConstraintInitial", "has_storage is True: constraint = constraint + sum(m.PowerOutputStorage[s,t] for s in m.Storage) if", "Constraint.Skip elif t <= (value(m.NumTimePeriods - m.MinimumDownTime[g]) + 1): # the right-hand side", "m.UnitOn[g, t-1])) # compute the per-generator, per-time period shutdown costs. def compute_shutdown_costs_rule(m, g,", "ptdf=None, slack_bus=1): model.LinePowerConstraintLower = Constraint(model.TransmissionLines, model.TimePeriods, rule=lower_line_power_bounds_rule) model.LinePowerConstraintHigher = Constraint(model.TransmissionLines, model.TimePeriods, rule=upper_line_power_bounds_rule) if", "be physically true, but if a generator has ShutdownRampLimit >> MaximumPowerOutput, this constraint", "m.GenerationStageCost[st] == sum(m.ProductionCost[g, t] for g in m.Generators for t in m.GenerationTimeInStage[st]) +", "m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g,", "in the previous time period but on in this one => # the", "t in m.CommitmentTimeInStage[st]) * m.TimePeriodLength for g in m.Generators)) def generation_in_stage_st_cost_rule(m, st): return", "Piecewise(model.Generators * model.TimePeriods, model.ProductionCost, model.PowerGenerated, pw_pts=model.PowerGenerationPiecewisePoints, f_rule=production_cost_function, pw_constr_type='LB', warning_tol=1e-20) model.ComputeHotStart = Constraint(model.Generators, model.TimePeriods,", "rule=line_power_rule) def constraint_total_demand(model): model.CalculateTotalDemand = Constraint(model.TimePeriods, rule=calculate_total_demand) def constraint_load_generation_mismatch(model): model.PosLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule)", "that the unit is required to be on. if t == 0: return", "g, t): # 4 cases, split by (t-1, t) unit status (RHS is", "(1 - m.UnitOn[g, t]) else: return m.MaximumPowerAvailable[g, t] <= m.PowerGenerated[g, t-1] + \\", "for st in m.StageSet) def constraint_net_power(model, has_storage=False, has_non_dispatchable_generators=False): partial_net_power_at_bus_rule = partial(net_power_at_bus_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators)", "version fixes the problem with ignoring initial conditions mentioned in the above note", "return Constraint.Skip #else: # return m.MaximumPowerAvailable[g, t] <= \\ # m.MaximumPowerOutput[g] * m.UnitOn[g,", "return m.MaximumPowerAvailable[g, t] <= m.PowerGeneratedT0[g] + \\ m.NominalRampUpLimit[g] * m.UnitOnT0[g] + \\ m.StartupRampLimit[g]", "= constraint + sum(m.PowerOutputStorage[s,t] for s in m.Storage) if has_global_reserves is True: constraint", "g, t): # 4 cases, split by (t-1, t) unit status: # (0,", "rule=partial_net_power_at_bus_rule) ################################################ def constraint_line(model, ptdf=None, slack_bus=1): model.LinePowerConstraintLower = Constraint(model.TransmissionLines, model.TimePeriods, rule=lower_line_power_bounds_rule) model.LinePowerConstraintHigher =", "model.TimePeriods, rule=enforce_generator_output_limits_rule_part_a) model.EnforceGeneratorOutputLimitsPartB = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_b) model.EnforceGeneratorOutputLimitsPartC = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_c) model.EnforceMaxAvailableRampUpRates", "t == 0: return m.MaximumPowerAvailable[g, t] <= m.PowerGeneratedT0[g] + \\ m.NominalRampUpLimit[g] * m.UnitOnT0[g]", "not involving the initial condition. @simple_constraint_rule def enforce_up_time_constraints_subsequent(m, g, t): if t <=", "= Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_rule) def constraint_total_demand(model): model.CalculateTotalDemand = Constraint(model.TimePeriods, rule=calculate_total_demand) def constraint_load_generation_mismatch(model): model.PosLoadGenerateMismatchTolerance", "= m.NetPowerInjectionAtBus[b, t] == constraint return constraint # give meaning to the positive", "t] def enforce_max_available_ramp_up_rates_rule(m, g, t): # 4 cases, split by (t-1, t) unit", "\\ + m.LoadGenerateMismatch[b,t] == 0 return constraint def net_power_at_bus_rule(m, b, t, has_storage=False, has_non_dispatchable_generators=False):", "m.MaximumPowerAvailable[g, t-1] <= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] *", "t): # 4 cases, split by (t, t+1) unit status # (0, 0)", "unit staying off: RHS = 0 (degenerate upper bound) # (0, 1) -", "has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.CalculateNetPowerAtBus = Constraint(model.Buses, model.TimePeriods, rule=partial_net_power_at_bus_rule) ################################################ def constraint_line(model, ptdf=None, slack_bus=1): model.LinePowerConstraintLower", "if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return m.ThermalLimit[l] >= m.LinePower[l, t] else: return", "constraint + sum(m.NondispatchablePowerUsed[n,t] for n in m.NondispatchableGenerators) if has_storage is True: constraint =", "m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t] * (1", "return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\", "(t + value(m.MinimumDownTime[g]) - 1)) >= \\ m.MinimumDownTime[g] * (m.UnitOnT0[g] - m.UnitOn[g, t])", "* m.PowerGenerated[g, t] for g in m.GeneratorsAtBus[b]) if has_storage is True: constraint =", "- 1)) >= \\ m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1]) else: #", "m.TimePeriods if n >= t and n <= (t + value(m.MinimumUpTime[g]) - 1))", "== sum(float(m.PTDF[l, i]) * m.NetPowerInjectionAtBus[b, t] for i, b in enumerate(m.Buses)) def line_power_rule(m,", "is shut down in # this interval, it must remain off-line until the", "else: return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \\ -", "period after that not involving the initial condition. @simple_constraint_rule def enforce_up_time_constraints_subsequent(m, g, t):", "(t-1, t) unit status: # (0, 0) - unit staying off: RHS =", "constraint + sum(m.PowerOutputStorage[s,t] for s in m.Storage) if has_global_reserves is True: constraint =", "+ \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t]) def enforce_max_available_ramp_down_rates_rule(m, g, t):", "on: RHS = maximum generator output minus shutdown ramp limit (degenerate upper bound)", "0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] +", "t): return m.PowerGenerated[g,t] <= m.MaximumPowerAvailable[g, t] def enforce_generator_output_limits_rule_part_c(m, g, t): return m.MaximumPowerAvailable[g,t] <=", "def enforce_ramp_down_limits_rule(m, g, t): # 4 cases, split by (t-1, t) unit status:", "small time horizons are specified return sum(((1 - m.UnitOn[g, n]) - (m.UnitOnT0[g] -", "standard ramp limit minus startup ramp limit plus maximum power output (degenerate upper", "t] - m.UnitOnT0[g]) else: return sum(m.UnitOn[g, n] for n in m.TimePeriods if n", "m.UnitOnT0[g]) else: return sum(m.UnitOn[g, n] for n in m.TimePeriods if n >= t", "t] * (1 - m.UnitOn[g, t-1]) def enforce_ramp_up_limits_rule(m, g, t): # 4 cases,", "for n in m.NondispatchableGenerators) if has_storage is True: constraint = constraint + sum(m.PowerOutputStorage[s,t]", ">= 0.0 def power_balance(m, b, t, has_storage=False, has_non_dispatchable_generators=False): # Power balance at each", "m.UnitOn[g, t-1]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t]) def enforce_max_available_ramp_down_rates_rule(m,", "* logger = logging.getLogger(__file__) eps = 1e-3 def fix_first_angle_rule(m,t, slack_bus=1): return m.Angle[m.Buses[slack_bus], t]", "if has_regulating_reserves is True: model.CalculateRegulatingReserveUpPerGenerator = Constraint(model.Generators, model.TimePeriods, rule=calculate_regulating_reserve_up_available_per_generator) if has_zonal_reserves is True:", "== 0.0 def lower_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return", "by (t, t+1) unit status # (0, 0) - unit staying off: RHS", "plus maximum power output (degenerate upper bound due to unit off) # (1,", "== 0: return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \\", "* (m.UnitOnT0[g] - m.UnitOn[g, t]) else: return sum((1 - m.UnitOn[g, n]) for n", "- m.UnitOn[g, n]) for n in m.TimePeriods if n >= t and n", "time horizons are specified return sum(((1 - m.UnitOn[g, n]) - (m.UnitOnT0[g] - m.UnitOn[g,", "model.EnforceDownTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_down_time_constraints_initial) model.EnforceDownTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_down_time_constraints_subsequent) def production_cost_function(m, g, t,", "logging from pyomo.environ import * logger = logging.getLogger(__file__) eps = 1e-3 def fix_first_angle_rule(m,t,", "has_non_dispatchable_generators=False, has_global_reserves=False): constraint = sum(m.MaximumPowerAvailable[g, t] for g in m.Generators) if has_non_dispatchable_generators is", "m.MaximumPowerAvailable[g, t] def enforce_generator_output_limits_rule_part_c(m, g, t): return m.MaximumPowerAvailable[g,t] <= m.MaximumPowerOutput[g, t] * m.UnitOn[g,", "- m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOnT0[g])) else: return m.StartupCost[g, t] >= m.ColdStartCost[g]", "ramp limit # (1, 1) - unit staying on: RHS = standard ramp-down", "= global_posneg_rule) def constraint_power_balance(model, has_storage=False, has_non_dispatchable_generators=False): fn_power_balance = partial(power_balance, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.PowerBalance =", "m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t]) def enforce_max_available_ramp_down_rates_rule(m, g, t): # 4", "= Constraint(model.TransmissionLines, model.TimePeriods, rule=lower_line_power_bounds_rule) model.LinePowerConstraintHigher = Constraint(model.TransmissionLines, model.TimePeriods, rule=upper_line_power_bounds_rule) if ptdf is not", "if t <= value(m.InitialTimePeriodsOffLine[g]): # handled by the EnforceDownTimeConstraintInitial constraint. return Constraint.Skip elif", "# this interval, it must remain off-line until the end of the time", "return sum(m.StageCost[st] for st in m.StageSet) def constraint_net_power(model, has_storage=False, has_non_dispatchable_generators=False): partial_net_power_at_bus_rule = partial(net_power_at_bus_rule,", "This may never be physically true, but if a generator has ShutdownRampLimit >>", "time periods that the unit is required to be on. if t ==", "- m.UnitOn[g, t]) def enforce_up_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOnLine[g]) == 0: return Constraint.Skip return", "has_non_dispatchable_generators=False): fn_power_balance = partial(power_balance, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.PowerBalance = Constraint(model.Buses, model.TimePeriods, rule=fn_power_balance) def constraint_reserves(model,", "EnforceUpTimeConstraintInitial constraint. return Constraint.Skip elif t <= (value(m.NumTimePeriods - m.MinimumUpTime[g]) + 1): #", "s in m.StorageAtBus[b]) if has_non_dispatchable_generators is True: constraint = constraint + sum(m.NondispatchablePowerUsed[g, t]", "t] - m.Angle[m.BusTo[l], t]) def calculate_total_demand(m, t): return m.TotalDemand[t] == sum(m.Demand[b,t] for b", "t-1]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t]) def enforce_max_available_ramp_down_rates_rule(m, g,", "t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return -m.ThermalLimit[l] <= m.LinePower[l, t] else:", "- m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1]) def", "previous time period if t == 0: return m.MaximumPowerAvailable[g, t] <= m.PowerGeneratedT0[g] +", "on-line until the end of the time span. if t == 0: #", "(degenerate upper bound due to unit off) # (1, 1) - unit staying", "m.StartupRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) ) + \\ -1 * ( m.MaximumPowerOutput[g,", "constraint = constraint + sum(m.PowerOutputStorage[s, t] for s in m.StorageAtBus[b]) \\ - sum(m.PowerInputStorage[s,", "staying off: RHS = maximum generator output (degenerate upper bound due to unit", "<= value(m.InitialTimePeriodsOffLine[g])) == 0.0 # constraint for each time period after that not", "compute_shutdown_costs_rule(m, g, t): if t == 0: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] *", "t in m.GenerationTimeInStage[st]) + m.LoadMismatchPenalty * \\ (sum(m.posLoadGenerateMismatch[b, t] + m.negLoadGenerateMismatch[b, t] for", "# (0, 0) - unit staying off: RHS = maximum generator output (degenerate", "np from functools import partial import logging from pyomo.environ import * logger =", "NOT consider ramp down from initial conditions to t=1! #if t == value(m.NumTimePeriods):", "* m.TimePeriodLength for g in m.Generators)) def generation_in_stage_st_cost_rule(m, st): return m.GenerationStageCost[st] == sum(m.ProductionCost[g,", "fixes the problem with ignoring initial conditions mentioned in the above note if", "1) - unit staying on: RHS = maximum generator output (degenerate upper bound)", "- m.UnitOn[g, t]) else: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOn[g, t-1] -", "constraint_generator_power(model): model.EnforceGeneratorOutputLimitsPartA = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_a) model.EnforceGeneratorOutputLimitsPartB = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_b) model.EnforceGeneratorOutputLimitsPartC =", "True: constraint = constraint - m.ReserveRequirement[t] constraint = constraint == m.TotalDemand[t] + m.GlobalLoadGenerateMismatch[t]", "t] - m.UnitOn[g, t-1]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t])", "(MinimumUpTime[g] - 1) time periods - if a unit is started up in", "= Constraint(model.Generators, model.TimePeriods, rule=compute_shutdown_costs_rule) model.Compute_commitment_in_stage_st_cost = Constraint(model.StageSet, rule = commitment_in_stage_st_cost_rule) model.Compute_generation_in_stage_st_cost = Constraint(model.StageSet,", "in m.Buses) def neg_load_generate_mismatch_tolerance_rule(m, b): return sum((m.negLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0", "m.LinesFrom[b]) \\ + m.LoadGenerateMismatch[b,t] == 0 return constraint def net_power_at_bus_rule(m, b, t, has_storage=False,", "* m.UnitOn[g, t+1] + \\ # m.ShutdownRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t+1])", "m.GenerationStageCost[st] + m.CommitmentStageCost[st] def total_cost_objective_rule(m): return sum(m.StageCost[st] for st in m.StageSet) def constraint_net_power(model,", "return constraint # give meaning to the positive and negative parts of the", "causes problems # (1, 0) - unit switching off: RHS = shutdown ramp", "m.NondispatchableGenerators) if has_storage is True: constraint = constraint + sum(m.PowerOutputStorage[s,t] for s in", "# Not 100% sure of this one since there is no MaximumPowerAvailableT0 return", "off: RHS = maximum generator output (degenerate upper bound due to unit being", "import * logger = logging.getLogger(__file__) eps = 1e-3 def fix_first_angle_rule(m,t, slack_bus=1): return m.Angle[m.Buses[slack_bus],", "def constraint_for_cost(model): model.ComputeProductionCosts = Piecewise(model.Generators * model.TimePeriods, model.ProductionCost, model.PowerGenerated, pw_pts=model.PowerGenerationPiecewisePoints, f_rule=production_cost_function, pw_constr_type='LB', warning_tol=1e-20)", "else: return m.LinePower[l,t] == m.B[l] * (m.Angle[m.BusFrom[l], t] - m.Angle[m.BusTo[l], t]) def calculate_total_demand(m,", "the minimum number of subsequent consecutive time periods that the unit is required", "minus shutdown ramp limit (degenerate upper bound) - this is the strangest case.", "g, t): return m.RegulatingReserveUpAvailable[g, t] == m.MaximumPowerAvailable[g,t] - m.PowerGenerated[g,t] def enforce_zonal_reserve_requirement_rule(m, rz, t):", "m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t]) else: return m.MaximumPowerAvailable[g, t] <= m.PowerGenerated[g,", "m.HotStartCost[g])*m.HotStart[g, t] \\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOnT0[g])) else: return m.StartupCost[g,", "= 1 m.HotStart[g, t].fixed = True return Constraint.Skip else: return m.HotStart[g, t] <=", "t in m.TimePeriods if t <= value(m.InitialTimePeriodsOnLine[g])) == 0.0 # constraint for each", "# (1, 0) - unit switching off: RHS = shutdown ramp limit #", "( m.NominalRampUpLimit[g] * m.UnitOn[g, t] ) + \\ -1 * ( m.StartupRampLimit[g] *", "MaximumPowerAvailableT0 return m.PowerGeneratedT0[g] <= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g,t] + \\ m.ShutdownRampLimit[g] *", "when small time horizons are specified return sum(((1 - m.UnitOn[g, n]) - (m.UnitOnT0[g]", "= Constraint(model.StageSet, rule = generation_in_stage_st_cost_rule) model.Compute_Stage_Cost = Constraint(model.StageSet, rule = StageCost_rule) def objective_function(model):", "to be on. if t == 0: return sum(m.UnitOn[g, n] for n in", "- m.GeneratorForcedOutage[g,t]) * m.GeneratorBusContributionFactor[g, b] * m.PowerGenerated[g, t] for g in m.GeneratorsAtBus[b]) if", "= True return Constraint.Skip else: return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for", "model.Compute_commitment_in_stage_st_cost = Constraint(model.StageSet, rule = commitment_in_stage_st_cost_rule) model.Compute_generation_in_stage_st_cost = Constraint(model.StageSet, rule = generation_in_stage_st_cost_rule) model.Compute_Stage_Cost", "m.UnitOn[g, i] for i in range(t - m.ColdStartHours[g], t) ) def compute_startup_costs_rule_minusM(m, g,", "constraint + sum(m.NondispatchablePowerUsed[g, t] for g in m.NondispatchableGeneratorsAtBus[b]) constraint = constraint + m.LoadGenerateMismatch[b,t]", "\\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOnT0[g])) else: return m.StartupCost[g, t] >=", "in m.Generators for t in m.CommitmentTimeInStage[st]) + sum(sum(m.UnitOn[g,t] * m.MinimumProductionCost[g, t] for t", "m.MinimumUpTime[g]) + 1): # the right-hand side terms below are only positive if", "unit staying off: RHS = maximum generator output (degenerate upper bound due to", "* (m.UnitOn[g, t] - m.UnitOnT0[g]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g,", "expressed in Carrion-Arroyo and subsequently here, this constraint does NOT consider ramp down", "is required to be on. if t == 0: return sum((1 - m.UnitOn[g,", "partial(power_balance, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.PowerBalance = Constraint(model.Buses, model.TimePeriods, rule=fn_power_balance) def constraint_reserves(model, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=True,", "specified return sum(((1 - m.UnitOn[g, n]) - (m.UnitOnT0[g] - m.UnitOn[g, t])) for n", "t] * (1 - m.UnitOn[g, t]) def enforce_max_available_ramp_down_rates_rule(m, g, t): # 4 cases,", "0: return sum(m.UnitOn[g, n] for n in m.TimePeriods if n >= t and", "in piecewise linearization of the cost function. return m.TimePeriodLength * m.PowerGenerationPiecewiseValues[g,t][x] * m.FuelCost[g]", "m.TotalDemand[t] == sum(m.Demand[b,t] for b in m.Buses) def neg_load_generate_mismatch_tolerance_rule(m, b): return sum((m.negLoadGenerateMismatch[b,t] for", "# this interval, it must remain on-line until the end of the time", "initial conditions mentioned in the above note if t == 0: # Not", "- 1)) >= \\ m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) else: return sum(m.UnitOn[g,", "unit is started up in # this interval, it must remain on-line until", "= partial(enforce_reserve_requirements_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators, has_global_reserves=has_global_reserves) model.EnforceReserveRequirements = Constraint(model.TimePeriods, rule=fn_enforce_reserve_requirements) if has_regulating_reserves is True:", "As expressed in Carrion-Arroyo and subsequently here, this constraint does NOT consider ramp", "* m.GeneratorBusContributionFactor[g, b] * m.PowerGenerated[g, t] for g in m.GeneratorsAtBus[b]) if has_storage is", "t-1] - m.PowerGenerated[g, t] >= \\ -1 * ( m.NominalRampUpLimit[g] * m.UnitOn[g, t]", "Angle constraint skipped for line between {} and {} \".format(m.BusFrom[l], m.BusTo[l])) return Constraint.Skip", "(m.UnitOnT0[g] - m.UnitOn[g, t]) ) + \\ -1 * ( m.MaximumPowerOutput[g, t] *", "m.TimePeriods if t <= value(m.InitialTimePeriodsOffLine[g])) == 0.0 # constraint for each time period", "condition. @simple_constraint_rule def enforce_up_time_constraints_subsequent(m, g, t): if t <= value(m.InitialTimePeriodsOnLine[g]): # handled by", "(1 - m.UnitOn[g, t-1]) ) # compute startup costs for each generator, for", "t, x): # a function for use in piecewise linearization of the cost", "t and n <= (t + value(m.MinimumDownTime[g]) - 1)) >= \\ m.MinimumDownTime[g] *", "if n >= t) >= 0.0 def commitment_in_stage_st_cost_rule(m, st): return m.CommitmentStageCost[st] == (sum(m.StartupCost[g,t]", "return sum((m.negLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0 def pos_load_generate_mismatch_tolerance_rule(m, b): return sum((m.posLoadGenerateMismatch[b,t]", "each generator, for each time period def compute_hot_start_rule(m, g, t): if t <=", "m.PowerGenerated[g,t] <= m.MaximumPowerAvailable[g, t] def enforce_generator_output_limits_rule_part_c(m, g, t): return m.MaximumPowerAvailable[g,t] <= m.MaximumPowerOutput[g, t]", "m.TimePeriods)) >= 0.0 def power_balance(m, b, t, has_storage=False, has_non_dispatchable_generators=False): # Power balance at", "minimum number of subsequent consecutive time periods that the unit is required to", "0) - unit switching off: RHS = shutdown ramp limit # (1, 1)", "else: return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i in range(t -", "previous time period but on in this one => # the value is", "def constraint_load_generation_mismatch(model): model.PosLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule) model.NegLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule) model.Defineposneg_Mismatch = Constraint(model.Buses,", "logging.getLogger(__file__) eps = 1e-3 def fix_first_angle_rule(m,t, slack_bus=1): return m.Angle[m.Buses[slack_bus], t] == 0.0 def", "has_non_dispatchable_generators=has_non_dispatchable_generators, has_global_reserves=has_global_reserves) model.EnforceReserveRequirements = Constraint(model.TimePeriods, rule=fn_enforce_reserve_requirements) if has_regulating_reserves is True: model.CalculateRegulatingReserveUpPerGenerator = Constraint(model.Generators,", "has_non_dispatchable_generators=False, has_global_reserves=True, has_regulating_reserves=True, has_zonal_reserves=False): if has_global_reserves is True: fn_enforce_reserve_requirements = partial(enforce_reserve_requirements_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators,", "if a unit is shut down in # this interval, it must remain", ">= 0.0 # constraint due to initial conditions. def enforce_down_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOffLine[g])", "n >= t and n <= (t + value(m.MinimumDownTime[g]) - 1)) >= \\", "t] - m.UnitOn[g, t-1])) # compute the per-generator, per-time period shutdown costs. def", "case. # (1, 0) - unit switching off: RHS = shutdown ramp limit", "- if a unit is started up in # this interval, it must", "if t == 0: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOnT0[g] - m.UnitOn[g,", "standard ramp limit plus power generated in previous time period if t ==", "\\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1]) def enforce_ramp_up_limits_rule(m, g, t): #", "the final (MinimumDownTime[g] - 1) time periods - if a unit is shut", "model.EnforceGeneratorOutputLimitsPartB = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_b) model.EnforceGeneratorOutputLimitsPartC = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_c) model.EnforceMaxAvailableRampUpRates = Constraint(model.Generators,", "= Constraint(model.Generators, model.TimePeriods, rule=compute_hot_start_rule) model.ComputeStartupCostsMinusM = Constraint(model.Generators, model.TimePeriods, rule=compute_startup_costs_rule_minusM) model.ComputeShutdownCosts = Constraint(model.Generators, model.TimePeriods,", "global_posneg_rule) def constraint_power_balance(model, has_storage=False, has_non_dispatchable_generators=False): fn_power_balance = partial(power_balance, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.PowerBalance = Constraint(model.Buses,", "* (m.UnitOnT0[g] - m.UnitOn[g, t]) ) + \\ -1 * ( m.MaximumPowerOutput[g, t]", "it must remain off-line until the end of the time span. if t", "constraint = constraint + sum(m.PowerOutputStorage[s,t] for s in m.Storage) if has_global_reserves is True:", "== 0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t]", "\\ - sum(m.LinePower[l,t] for l in m.LinesFrom[b]) \\ + m.LoadGenerateMismatch[b,t] == 0 return", "return m.posGlobalLoadGenerateMismatch[t] - m.negGlobalLoadGenerateMismatch[t] == m.GlobalLoadGenerateMismatch[t] def enforce_reserve_requirements_rule(m, t, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=False): constraint", "- m.UnitOn[g, t])) for n in m.TimePeriods if n >= t) >= 0.0", "<= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] -", "== 0: return sum((1 - m.UnitOn[g, n]) for n in m.TimePeriods if n", "give meaning to the positive and negative parts of the mismatch def posneg_rule(m,", "1)) >= \\ m.MinimumDownTime[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) else: return sum((1 -", "- m.UnitOnT0[g]) else: return sum(m.UnitOn[g, n] for n in m.TimePeriods if n >=", "np.any(np.absolute(m.ThermalLimit[l]) > eps): return -m.ThermalLimit[l] <= m.LinePower[l, t] else: return Constraint.Skip def upper_line_power_bounds_rule(m,", "cases, split by (t-1, t) unit status (RHS is defined as the delta", "sum(m.ProductionCost[g, t] for g in m.Generators for t in m.GenerationTimeInStage[st]) + m.LoadMismatchPenalty *", "m.UnitOn[g, n]) - (m.UnitOnT0[g] - m.UnitOn[g, t])) for n in m.TimePeriods if n", "t-1] - m.UnitOn[g, t]) def enforce_up_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOnLine[g]) == 0: return Constraint.Skip", "- unit switching off: RHS = standard ramp limit minus startup ramp limit", "due to unit being off) # (0, 1) - unit switching on: RHS", "= 1e-3 def fix_first_angle_rule(m,t, slack_bus=1): return m.Angle[m.Buses[slack_bus], t] == 0.0 def lower_line_power_bounds_rule(m, l,", "this is the strangest case. # (1, 0) - unit switching off: RHS", "- m.UnitOn[g, n]) - (m.UnitOnT0[g] - m.UnitOn[g, t])) for n in m.TimePeriods if", "enforce_max_available_ramp_up_rates_rule(m, g, t): # 4 cases, split by (t-1, t) unit status (RHS", "t <= value(m.InitialTimePeriodsOnLine[g]): # handled by the EnforceUpTimeConstraintInitial constraint. return Constraint.Skip elif t", "# m.ShutdownRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t+1]) #This version fixes the problem", "= Constraint(model.Generators, model.TimePeriods, rule=compute_startup_costs_rule_minusM) model.ComputeShutdownCosts = Constraint(model.Generators, model.TimePeriods, rule=compute_shutdown_costs_rule) model.Compute_commitment_in_stage_st_cost = Constraint(model.StageSet, rule", "0.0 else: return sum(((1 - m.UnitOn[g, n]) - (m.UnitOn[g, t-1] - m.UnitOn[g, t]))", "- m.Angle[m.BusTo[l], t]) def calculate_total_demand(m, t): return m.TotalDemand[t] == sum(m.Demand[b,t] for b in", "m.MinimumPowerOutput[g, t] * m.UnitOn[g, t] <= m.PowerGenerated[g,t] def enforce_generator_output_limits_rule_part_b(m, g, t): return m.PowerGenerated[g,t]", "= Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_up_rates_rule) model.EnforceMaxAvailableRampDownRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_down_rates_rule) model.EnforceNominalRampDownLimits = Constraint(model.Generators, model.TimePeriods,", "t <= value(m.ColdStartHours[g]): if t - value(m.ColdStartHours[g]) <= value(m.UnitOnT0State[g]): m.HotStart[g, t] = 1", "rule=lower_line_power_bounds_rule) model.LinePowerConstraintHigher = Constraint(model.TransmissionLines, model.TimePeriods, rule=upper_line_power_bounds_rule) if ptdf is not None: model.PTDF =", "m.UnitOnT0[g] + \\ m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) + \\ m.MaximumPowerOutput[g, t]", "for t in m.GenerationTimeInStage[st]) + \\ sum(m.posGlobalLoadGenerateMismatch[t] + m.negGlobalLoadGenerateMismatch[t] for t in m.GenerationTimeInStage[st]))", "b in enumerate(m.Buses)) def line_power_rule(m, l, t): if m.B[l] == 99999999: logger.debug(\" Line", "* (1 - m.UnitOnT0[g]) else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] <= \\", "\\ m.NominalRampUpLimit[g] * m.UnitOn[g, t-1] + \\ m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g,", "== 0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] >= \\ -1 * ( m.NominalRampUpLimit[g]", "rule = commitment_in_stage_st_cost_rule) model.Compute_generation_in_stage_st_cost = Constraint(model.StageSet, rule = generation_in_stage_st_cost_rule) model.Compute_Stage_Cost = Constraint(model.StageSet, rule", "constraint = sum(m.MaximumPowerAvailable[g, t] for g in m.Generators) if has_non_dispatchable_generators is True: constraint", "sum((1 - m.UnitOn[g, t]) for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOnLine[g])) ==", "for t in m.CommitmentTimeInStage[st]) * m.TimePeriodLength for g in m.Generators)) def generation_in_stage_st_cost_rule(m, st):", "when small time horizons are specified return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] -", "t): if t <= value(m.InitialTimePeriodsOffLine[g]): # handled by the EnforceDownTimeConstraintInitial constraint. return Constraint.Skip", "return m.GenerationStageCost[st] == sum(m.ProductionCost[g, t] for g in m.Generators for t in m.GenerationTimeInStage[st])", "Constraint(model.Generators, model.TimePeriods, rule=enforce_down_time_constraints_subsequent) def production_cost_function(m, g, t, x): # a function for use", "m.StageSet) def constraint_net_power(model, has_storage=False, has_non_dispatchable_generators=False): partial_net_power_at_bus_rule = partial(net_power_at_bus_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.CalculateNetPowerAtBus = Constraint(model.Buses,", "t] for g in m.GeneratorsAtBus[b]) if has_storage is True: constraint = constraint +", "-1 * ( m.NominalRampUpLimit[g] * m.UnitOn[g, t] ) + \\ -1 * (", "+ \\ -1 * ( m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) ) else:", "m.UnitOn[g, t] def enforce_max_available_ramp_up_rates_rule(m, g, t): # 4 cases, split by (t-1, t)", "m.StorageAtBus[b]) if has_non_dispatchable_generators is True: constraint = constraint + sum(m.NondispatchablePowerUsed[g, t] for g", "return m.LinePower[l,t] == sum(float(m.PTDF[l, i]) * m.NetPowerInjectionAtBus[b, t] for i, b in enumerate(m.Buses))", "m.B[l] * (m.Angle[m.BusFrom[l], t] - m.Angle[m.BusTo[l], t]) def calculate_total_demand(m, t): return m.TotalDemand[t] ==", "of the mismatch def posneg_rule(m, b, t): return m.posLoadGenerateMismatch[b, t] - m.negLoadGenerateMismatch[b, t]", "m.GeneratorsAtBus[b]) if has_storage is True: constraint = constraint + sum(m.PowerOutputStorage[s, t] for s", "{} and {} \".format(m.BusFrom[l], m.BusTo[l])) return Constraint.Skip else: return m.LinePower[l,t] == m.B[l] *", "span. if t == 0: # can happen when small time horizons are", "only positive if the unit was off in the previous time period but", "- (m.UnitOn[g, t] - m.UnitOn[g, t-1])) # compute the per-generator, per-time period shutdown", "# (0, 0) - unit staying off: RHS = 0 (degenerate upper bound)", "sum(m.PowerOutputStorage[s, t] for s in m.StorageAtBus[b]) \\ - sum(m.PowerInputStorage[s, t] for s in", "- m.UnitOn[g, t]) ) + \\ -1 * ( m.MaximumPowerOutput[g, t] * (1", "model.ComputeShutdownCosts = Constraint(model.Generators, model.TimePeriods, rule=compute_shutdown_costs_rule) model.Compute_commitment_in_stage_st_cost = Constraint(model.StageSet, rule = commitment_in_stage_st_cost_rule) model.Compute_generation_in_stage_st_cost =", "model.TimePeriods, rule=compute_hot_start_rule) model.ComputeStartupCostsMinusM = Constraint(model.Generators, model.TimePeriods, rule=compute_startup_costs_rule_minusM) model.ComputeShutdownCosts = Constraint(model.Generators, model.TimePeriods, rule=compute_shutdown_costs_rule) model.Compute_commitment_in_stage_st_cost", "model.ComputeProductionCosts = Piecewise(model.Generators * model.TimePeriods, model.ProductionCost, model.PowerGenerated, pw_pts=model.PowerGenerationPiecewisePoints, f_rule=production_cost_function, pw_constr_type='LB', warning_tol=1e-20) model.ComputeHotStart =", "# (1, 0) - unit switching off: RHS = standard ramp limit minus", "# 4 cases, split by (t-1, t) unit status: # (0, 0) -", "def commitment_in_stage_st_cost_rule(m, st): return m.CommitmentStageCost[st] == (sum(m.StartupCost[g,t] + m.ShutdownCost[g,t] for g in m.Generators", "skipped for line between {} and {} \".format(m.BusFrom[l], m.BusTo[l])) return Constraint.Skip else: return", "mismatch def posneg_rule(m, b, t): return m.posLoadGenerateMismatch[b, t] - m.negLoadGenerateMismatch[b, t] == m.LoadGenerateMismatch[b,", "enforce_max_available_ramp_down_rates_rule(m, g, t): # 4 cases, split by (t, t+1) unit status #", "- m.UnitOn[g, t-1]) def enforce_ramp_up_limits_rule(m, g, t): # 4 cases, split by (t-1,", "m.B[l] == 99999999: logger.debug(\" Line Power Angle constraint skipped for line between {}", "never be physically true, but if a generator has ShutdownRampLimit >> MaximumPowerOutput, this", "+ \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1]) def enforce_ramp_up_limits_rule(m, g, t):", "final (MinimumUpTime[g] - 1) time periods - if a unit is started up", "m.Demand[b, t] constraint = m.NetPowerInjectionAtBus[b, t] == constraint return constraint # give meaning", "<= m.MaximumPowerAvailable[g, t] def enforce_generator_output_limits_rule_part_c(m, g, t): return m.MaximumPowerAvailable[g,t] <= m.MaximumPowerOutput[g, t] *", "a function for use in piecewise linearization of the cost function. return m.TimePeriodLength", "upper bound due to unit being off) # (0, 1) - unit switching", "g, t): return m.MaximumPowerAvailable[g,t] <= m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] def enforce_max_available_ramp_up_rates_rule(m, g,", "== 0: return Constraint.Skip return sum((1 - m.UnitOn[g, t]) for t in m.TimePeriods", "enumerate(m.Buses)) def line_power_rule(m, l, t): if m.B[l] == 99999999: logger.debug(\" Line Power Angle", "None: model.PTDF = ptdf model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_ptdf_rule) else: partial_fix_first_angle_rule = partial(fix_first_angle_rule,", "n in m.NondispatchableGenerators) if has_storage is True: constraint = constraint + sum(m.PowerOutputStorage[s,t] for", "m.PowerGeneratedT0[g] + \\ m.NominalRampUpLimit[g] * m.UnitOnT0[g] + \\ m.StartupRampLimit[g] * (m.UnitOn[g, t] -", "sum(m.UnitOn[g, t] for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOffLine[g])) == 0.0 #", "m.BusTo[l])) return Constraint.Skip else: return m.LinePower[l,t] == m.B[l] * (m.Angle[m.BusFrom[l], t] - m.Angle[m.BusTo[l],", "0: return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \\ -", "slack_bus=1): return m.Angle[m.Buses[slack_bus], t] == 0.0 def lower_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and", ") + \\ -1 * ( m.StartupRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) )", "and n <= (t + value(m.MinimumUpTime[g]) - 1)) >= \\ m.MinimumUpTime[g] * (m.UnitOn[g,", "m.UnitOn[g,t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g,t]) else: return m.MaximumPowerAvailable[g, t-1] <=", "output minus shutdown ramp limit (degenerate upper bound) - this is the strangest", "m.TimePeriods if n >= t) >= 0.0 else: return sum(((1 - m.UnitOn[g, n])", "constraint for each time period after that not involving the initial condition. @simple_constraint_rule", "t] for s in m.StorageAtBus[b]) \\ - sum(m.PowerInputStorage[s, t] for s in m.StorageAtBus[b])", "0) - unit staying off: RHS = maximum generator output (degenerate upper bound", "g, t): # 4 cases, split by (t, t+1) unit status # (0,", "= Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_a) model.EnforceGeneratorOutputLimitsPartB = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_b) model.EnforceGeneratorOutputLimitsPartC = Constraint(model.Generators, model.TimePeriods,", "on in this one => # the value is the minimum number of", "pos_load_generate_mismatch_tolerance_rule(m, b): return sum((m.posLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0 def power_balance(m, b,", "from initial conditions to t=1! #if t == value(m.NumTimePeriods): # return Constraint.Skip #else:", "for t in m.GenerationTimeInStage[st]) + m.LoadMismatchPenalty * \\ (sum(m.posLoadGenerateMismatch[b, t] + m.negLoadGenerateMismatch[b, t]", "t] - m.UnitOnT0[g]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t]) else:", "m.PowerGenerated[g, t] >= \\ -1 * ( m.NominalRampUpLimit[g] * m.UnitOn[g, t] ) +", "def StageCost_rule(m, st): return m.StageCost[st] == m.GenerationStageCost[st] + m.CommitmentStageCost[st] def total_cost_objective_rule(m): return sum(m.StageCost[st]", "for g in m.Generators)) def generation_in_stage_st_cost_rule(m, st): return m.GenerationStageCost[st] == sum(m.ProductionCost[g, t] for", "final (MinimumDownTime[g] - 1) time periods - if a unit is shut down", "has_regulating_reserves is True: model.CalculateRegulatingReserveUpPerGenerator = Constraint(model.Generators, model.TimePeriods, rule=calculate_regulating_reserve_up_available_per_generator) if has_zonal_reserves is True: model.EnforceZonalReserveRequirements", "(1, 0) - unit switching off: RHS = shutdown ramp limit # (1,", "off: RHS = maximum generator output (degenerate upper bound) # (0, 1) -", "in enumerate(m.Buses)) def line_power_rule(m, l, t): if m.B[l] == 99999999: logger.debug(\" Line Power", "the unit is required to be on. if t == 0: return sum((1", ">= 0.0 def pos_load_generate_mismatch_tolerance_rule(m, b): return sum((m.posLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0", "m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOnT0[g])) else: return m.StartupCost[g, t] >= m.ColdStartCost[g] -", "t] * (1 - m.UnitOnT0[g]) ) else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t]", "bound due to unit off) # (1, 1) - unit staying on: RHS", "import partial import logging from pyomo.environ import * logger = logging.getLogger(__file__) eps =", "else: return sum((1 - m.UnitOn[g, n]) for n in m.TimePeriods if n >=", "model.LinePowerConstraintHigher = Constraint(model.TransmissionLines, model.TimePeriods, rule=upper_line_power_bounds_rule) if ptdf is not None: model.PTDF = ptdf", "side terms below are only positive if the unit was off in the", "* m.UnitOn[g, t] ) + \\ -1 * ( m.StartupRampLimit[g] * (m.UnitOn[g, t-1]", "t] for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOffLine[g])) == 0.0 # constraint", "+ sum(m.LinePower[l,t] for l in m.LinesTo[b]) \\ - sum(m.LinePower[l,t] for l in m.LinesFrom[b])", "\\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t]) else: return m.MaximumPowerAvailable[g, t] <=", "- m.HotStartCost[g])*m.HotStart[g, t] \\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOn[g, t-1])) #", "(degenerate upper bound) #NOTE: As expressed in Carrion-Arroyo and subsequently here, this constraint", "right-hand side terms below are only positive if the unit was off in", "m.ShutdownCostCoefficient[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) else: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] *", "\\ m.MinimumDownTime[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) else: # handle the final", "* m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) def", "if t == 0: return m.MaximumPowerAvailable[g, t] <= m.PowerGeneratedT0[g] + \\ m.NominalRampUpLimit[g] *", "interval, it must remain on-line until the end of the time span. if", "RHS = maximum generator output minus shutdown ramp limit (degenerate upper bound) -", "= constraint - m.Demand[b, t] constraint = m.NetPowerInjectionAtBus[b, t] == constraint return constraint", "limit if t == 0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] >= \\ -1", "m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i in range(1, t) ) else:", "i in range(1, t) ) else: return m.HotStart[g, t] <= sum( m.UnitOn[g, i]", "standard ramp-down limit if t == 0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] <=", "rule=enforce_max_available_ramp_up_rates_rule) model.EnforceMaxAvailableRampDownRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_down_rates_rule) model.EnforceNominalRampDownLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_down_limits_rule) model.EnforceNominalRampUpLimits =", "= standard ramp limit plus power generated in previous time period if t", "Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule) model.NegLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule) model.Defineposneg_Mismatch = Constraint(model.Buses, model.TimePeriods, rule = posneg_rule)", "(m.UnitOn[g, t] - m.UnitOn[g, t-1])) # compute the per-generator, per-time period shutdown costs.", "value(m.UnitOnT0State[g]): m.HotStart[g, t] = 1 m.HotStart[g, t].fixed = True return Constraint.Skip else: return", "sum(m.MaximumPowerAvailable[g, t] for g in m.Generators) if has_non_dispatchable_generators is True: constraint = constraint", "t].fixed = True return Constraint.Skip else: return m.HotStart[g, t] <= sum( m.UnitOn[g, i]", "problems # (1, 0) - unit switching off: RHS = shutdown ramp limit", "bound due to unit being off) # (0, 1) - unit switching on:", "m.UnitOn[g, t])) for n in m.TimePeriods if n >= t) >= 0.0 def", "enforce_zonal_reserve_requirement_rule(m, rz, t): return sum(m.RegulatingReserveUpAvailable[g,t] for g in m.GeneratorsInReserveZone[rz]) >= m.ZonalReserveRequirement[rz, t] def", "the previous time period but on in this one => # the value", "value(m.InitialTimePeriodsOffLine[g])) == 0.0 # constraint for each time period after that not involving", "period after that not involving the initial condition. @simple_constraint_rule def enforce_down_time_constraints_subsequent(m, g, t):", "- m.MinimumDownTime[g]) + 1): # the right-hand side terms below are only positive", "t=1! #if t == value(m.NumTimePeriods): # return Constraint.Skip #else: # return m.MaximumPowerAvailable[g, t]", "= partial(power_balance, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.PowerBalance = Constraint(model.Buses, model.TimePeriods, rule=fn_power_balance) def constraint_reserves(model, has_storage=False, has_non_dispatchable_generators=False,", "model.Compute_generation_in_stage_st_cost = Constraint(model.StageSet, rule = generation_in_stage_st_cost_rule) model.Compute_Stage_Cost = Constraint(model.StageSet, rule = StageCost_rule) def", "def total_cost_objective_rule(m): return sum(m.StageCost[st] for st in m.StageSet) def constraint_net_power(model, has_storage=False, has_non_dispatchable_generators=False): partial_net_power_at_bus_rule", "in m.StageSet) def constraint_net_power(model, has_storage=False, has_non_dispatchable_generators=False): partial_net_power_at_bus_rule = partial(net_power_at_bus_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.CalculateNetPowerAtBus =", "* m.PowerGenerationPiecewiseValues[g,t][x] * m.FuelCost[g] def constraint_for_cost(model): model.ComputeProductionCosts = Piecewise(model.Generators * model.TimePeriods, model.ProductionCost, model.PowerGenerated,", "\\ (sum(m.posLoadGenerateMismatch[b, t] + m.negLoadGenerateMismatch[b, t] for b in m.Buses for t in", "m.MaximumPowerAvailable[g, t] <= \\ # m.MaximumPowerOutput[g] * m.UnitOn[g, t+1] + \\ # m.ShutdownRampLimit[g]", "+ 1): # the right-hand side terms below are only positive if the", "-1 * ( m.StartupRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) ) + \\", "4 cases, split by (t, t+1) unit status # (0, 0) - unit", "<= (value(m.NumTimePeriods - m.MinimumUpTime[g]) + 1): # the right-hand side terms below are", "m.UnitOn[g, t]) def enforce_max_available_ramp_down_rates_rule(m, g, t): # 4 cases, split by (t, t+1)", "t] for g in m.Generators) if has_non_dispatchable_generators is True: constraint = constraint +", "m.Generators)) def generation_in_stage_st_cost_rule(m, st): return m.GenerationStageCost[st] == sum(m.ProductionCost[g, t] for g in m.Generators", "sum(m.RegulatingReserveUpAvailable[g,t] for g in m.GeneratorsInReserveZone[rz]) >= m.ZonalReserveRequirement[rz, t] def enforce_generator_output_limits_rule_part_a(m, g, t): return", "in m.GenerationTimeInStage[st])) def StageCost_rule(m, st): return m.StageCost[st] == m.GenerationStageCost[st] + m.CommitmentStageCost[st] def total_cost_objective_rule(m):", "rule=neg_load_generate_mismatch_tolerance_rule) model.Defineposneg_Mismatch = Constraint(model.Buses, model.TimePeriods, rule = posneg_rule) model.Global_Defineposneg_Mismatch = Constraint(model.TimePeriods, rule =", "staying on: RHS = standard ramp-down limit if t == 0: return m.PowerGeneratedT0[g]", "rule=enforce_ramp_up_limits_rule) def constraint_up_down_time(model): model.EnforceUpTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_up_time_constraints_initial) model.EnforceUpTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_up_time_constraints_subsequent) model.EnforceDownTimeConstraintsInitial", "True: fn_enforce_reserve_requirements = partial(enforce_reserve_requirements_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators, has_global_reserves=has_global_reserves) model.EnforceReserveRequirements = Constraint(model.TimePeriods, rule=fn_enforce_reserve_requirements) if has_regulating_reserves", "== m.GlobalLoadGenerateMismatch[t] def enforce_reserve_requirements_rule(m, t, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=False): constraint = sum(m.MaximumPowerAvailable[g, t] for", "- sum(m.LinePower[l,t] for l in m.LinesFrom[b]) \\ + m.LoadGenerateMismatch[b,t] == 0 return constraint", "if m.B[l] == 99999999: logger.debug(\" Line Power Angle constraint skipped for line between", "t): return m.TotalDemand[t] == sum(m.Demand[b,t] for b in m.Buses) def neg_load_generate_mismatch_tolerance_rule(m, b): return", "t] for g in m.Generators for t in m.GenerationTimeInStage[st]) + m.LoadMismatchPenalty * \\", "for s in m.StorageAtBus[b]) if has_non_dispatchable_generators is True: constraint = constraint + sum(m.NondispatchablePowerUsed[g,", "t] <= m.PowerGenerated[g,t] def enforce_generator_output_limits_rule_part_b(m, g, t): return m.PowerGenerated[g,t] <= m.MaximumPowerAvailable[g, t] def", "m.negLoadGenerateMismatch[b, t] for b in m.Buses for t in m.GenerationTimeInStage[st]) + \\ sum(m.posGlobalLoadGenerateMismatch[t]", "+ sum(m.NondispatchablePowerUsed[g, t] for g in m.NondispatchableGeneratorsAtBus[b]) constraint = constraint + m.LoadGenerateMismatch[b,t] constraint", "#else: # return m.MaximumPowerAvailable[g, t] <= \\ # m.MaximumPowerOutput[g] * m.UnitOn[g, t+1] +", "t]) ) + \\ -1 * ( m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g,", "posneg_rule) model.Global_Defineposneg_Mismatch = Constraint(model.TimePeriods, rule = global_posneg_rule) def constraint_power_balance(model, has_storage=False, has_non_dispatchable_generators=False): fn_power_balance =", "warning_tol=1e-20) model.ComputeHotStart = Constraint(model.Generators, model.TimePeriods, rule=compute_hot_start_rule) model.ComputeStartupCostsMinusM = Constraint(model.Generators, model.TimePeriods, rule=compute_startup_costs_rule_minusM) model.ComputeShutdownCosts =", "limit plus maximum power output (degenerate upper bound due to unit off) #", "l, t): return m.LinePower[l,t] == sum(float(m.PTDF[l, i]) * m.NetPowerInjectionAtBus[b, t] for i, b", "m.UnitOn[g, t]) else: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOn[g, t-1] - m.UnitOn[g,", "return Constraint.Skip return sum((1 - m.UnitOn[g, t]) for t in m.TimePeriods if t", "logger.debug(\" Line Power Angle constraint skipped for line between {} and {} \".format(m.BusFrom[l],", "= Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_up_limits_rule) def constraint_up_down_time(model): model.EnforceUpTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_up_time_constraints_initial) model.EnforceUpTimeConstraintsSubsequent = Constraint(model.Generators,", "enforce_up_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOnLine[g]) == 0: return Constraint.Skip return sum((1 - m.UnitOn[g, t])", "<= value(m.InitialTimePeriodsOnLine[g]): # handled by the EnforceUpTimeConstraintInitial constraint. return Constraint.Skip elif t <=", "slack_bus=slack_bus) model.FixFirstAngle = Constraint(model.TimePeriods, rule=partial_fix_first_angle_rule) model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_rule) def constraint_total_demand(model): model.CalculateTotalDemand", "constraint = sum((1 - m.GeneratorForcedOutage[g,t]) * m.GeneratorBusContributionFactor[g, b] * m.PowerGenerated[g, t] for g", "Constraint.Skip return sum(m.UnitOn[g, t] for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOffLine[g])) ==", "def constraint_total_demand(model): model.CalculateTotalDemand = Constraint(model.TimePeriods, rule=calculate_total_demand) def constraint_load_generation_mismatch(model): model.PosLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule) model.NegLoadGenerateMismatchTolerance", "(1 - m.UnitOnT0[g]) else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g]", "m.FuelCost[g] def constraint_for_cost(model): model.ComputeProductionCosts = Piecewise(model.Generators * model.TimePeriods, model.ProductionCost, model.PowerGenerated, pw_pts=model.PowerGenerationPiecewisePoints, f_rule=production_cost_function, pw_constr_type='LB',", "- m.negGlobalLoadGenerateMismatch[t] == m.GlobalLoadGenerateMismatch[t] def enforce_reserve_requirements_rule(m, t, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=False): constraint = sum(m.MaximumPowerAvailable[g,", "m.UnitOn[g, t-1]) else: # handle the final (MinimumUpTime[g] - 1) time periods -", "bus b, time t (S) constraint = m.NetPowerInjectionAtBus[b, t] + sum(m.LinePower[l,t] for l", "= Constraint(model.Generators, model.TimePeriods, rule=enforce_down_time_constraints_subsequent) def production_cost_function(m, g, t, x): # a function for", "m.LinePower[l,t] == sum(float(m.PTDF[l, i]) * m.NetPowerInjectionAtBus[b, t] for i, b in enumerate(m.Buses)) def", "enforce_down_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOffLine[g]) == 0: return Constraint.Skip return sum(m.UnitOn[g, t] for t", "def enforce_max_available_ramp_down_rates_rule(m, g, t): # 4 cases, split by (t, t+1) unit status", "limit if t == 0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g]", "m.UnitOn[g, t]) else: return sum((1 - m.UnitOn[g, n]) for n in m.TimePeriods if", "the EnforceDownTimeConstraintInitial constraint. return Constraint.Skip elif t <= (value(m.NumTimePeriods - m.MinimumDownTime[g]) + 1):", "m.MaximumPowerAvailable[g,t] <= m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] def enforce_max_available_ramp_up_rates_rule(m, g, t): # 4", "ramp limit plus maximum generator output - this is the strangest case #NOTE:", "t) >= 0.0 def commitment_in_stage_st_cost_rule(m, st): return m.CommitmentStageCost[st] == (sum(m.StartupCost[g,t] + m.ShutdownCost[g,t] for", "Constraint(model.Generators, rule=enforce_down_time_constraints_initial) model.EnforceDownTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_down_time_constraints_subsequent) def production_cost_function(m, g, t, x): #", "( m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) ) else: return m.PowerGenerated[g, t-1] -", "MaximumPowerOutput, this constraint causes problems # (1, 0) - unit switching off: RHS", "t-1]) # (0, 0) - unit staying off: RHS = maximum generator output", "limit minus startup ramp limit plus maximum power output (degenerate upper bound due", "- m.UnitOn[g, t-1]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t]) def", "n] for n in m.TimePeriods if n >= t and n <= (t", "Line Power Angle constraint skipped for line between {} and {} \".format(m.BusFrom[l], m.BusTo[l]))", "\\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g,", "m.LoadGenerateMismatch[b, t] def global_posneg_rule(m, t): return m.posGlobalLoadGenerateMismatch[t] - m.negGlobalLoadGenerateMismatch[t] == m.GlobalLoadGenerateMismatch[t] def enforce_reserve_requirements_rule(m,", ">= t) >= 0.0 else: return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOn[g,", "== 0: # can happen when small time horizons are specified return sum(((1", "return m.TotalDemand[t] == sum(m.Demand[b,t] for b in m.Buses) def neg_load_generate_mismatch_tolerance_rule(m, b): return sum((m.negLoadGenerateMismatch[b,t]", "split by (t-1, t) unit status (RHS is defined as the delta from", "True return Constraint.Skip else: return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i", "for use in piecewise linearization of the cost function. return m.TimePeriodLength * m.PowerGenerationPiecewiseValues[g,t][x]", "def neg_load_generate_mismatch_tolerance_rule(m, b): return sum((m.negLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0 def pos_load_generate_mismatch_tolerance_rule(m,", "shut down in # this interval, it must remain off-line until the end", "is True: constraint = constraint + sum(m.PowerOutputStorage[s, t] for s in m.StorageAtBus[b]) \\", "m.ShutdownRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t+1]) #This version fixes the problem with", "n >= t) >= 0.0 else: return sum(((1 - m.UnitOn[g, n]) - (m.UnitOn[g,", "(1, 1) - unit staying on: RHS = standard ramp-down limit if t", "= Constraint(model.Generators, model.TimePeriods, rule=calculate_regulating_reserve_up_available_per_generator) if has_zonal_reserves is True: model.EnforceZonalReserveRequirements = Constraint(model.ReserveZones, model.TimePeriods, rule=enforce_zonal_reserve_requirement_rule)", "unit switching on: RHS = maximum generator output minus shutdown ramp limit (degenerate", "+ value(m.MinimumDownTime[g]) - 1)) >= \\ m.MinimumDownTime[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t])", "+ m.LoadMismatchPenalty * \\ (sum(m.posLoadGenerateMismatch[b, t] + m.negLoadGenerateMismatch[b, t] for b in m.Buses", "each time period after that not involving the initial condition. @simple_constraint_rule def enforce_up_time_constraints_subsequent(m,", "t]) else: return m.MaximumPowerAvailable[g, t] <= m.PowerGenerated[g, t-1] + \\ m.NominalRampUpLimit[g] * m.UnitOn[g,", "t) >= 0.0 else: return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOn[g, t-1]))", "for l in m.LinesFrom[b]) \\ + m.LoadGenerateMismatch[b,t] == 0 return constraint def net_power_at_bus_rule(m,", "Constraint.Skip def line_power_ptdf_rule(m, l, t): return m.LinePower[l,t] == sum(float(m.PTDF[l, i]) * m.NetPowerInjectionAtBus[b, t]", "limit plus maximum generator output - this is the strangest case #NOTE: This", "# constraint for each time period after that not involving the initial condition.", "- m.UnitOn[g, t-1]) ) # compute startup costs for each generator, for each", "- 1)) >= \\ m.MinimumDownTime[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) else: #", "shutdown ramp limit (degenerate upper bound) - this is the strangest case. #", "positive if the unit was off in the previous time period but on", "in m.StorageAtBus[b]) if has_non_dispatchable_generators is True: constraint = constraint + sum(m.NondispatchablePowerUsed[g, t] for", "m.UnitOn[g, t]) def enforce_up_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOnLine[g]) == 0: return Constraint.Skip return sum((1", "(1, 1) - unit staying on: RHS = standard ramp limit plus power", "m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \\ - m.ColdStartCost[g]*(1 -", "to t=1! #if t == value(m.NumTimePeriods): # return Constraint.Skip #else: # return m.MaximumPowerAvailable[g,", "sum(m.PowerOutputStorage[s,t] for s in m.Storage) if has_global_reserves is True: constraint = constraint -", "def enforce_generator_output_limits_rule_part_c(m, g, t): return m.MaximumPowerAvailable[g,t] <= m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] def", "if t <= value(m.InitialTimePeriodsOnLine[g]): # handled by the EnforceUpTimeConstraintInitial constraint. return Constraint.Skip elif", "function. return m.TimePeriodLength * m.PowerGenerationPiecewiseValues[g,t][x] * m.FuelCost[g] def constraint_for_cost(model): model.ComputeProductionCosts = Piecewise(model.Generators *", "if t == 0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] >= \\ -1 *", "else: partial_fix_first_angle_rule = partial(fix_first_angle_rule, slack_bus=slack_bus) model.FixFirstAngle = Constraint(model.TimePeriods, rule=partial_fix_first_angle_rule) model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods,", "- m.PowerGenerated[g, t] >= \\ -1 * ( m.NominalRampUpLimit[g] * m.UnitOn[g, t] )", "m.PowerGenerated[g, t] for g in m.GeneratorsAtBus[b]) if has_storage is True: constraint = constraint", "1): # the right-hand side terms below are only positive if the unit", "def enforce_max_available_ramp_up_rates_rule(m, g, t): # 4 cases, split by (t-1, t) unit status", "= constraint + sum(m.NondispatchablePowerUsed[n,t] for n in m.NondispatchableGenerators) if has_storage is True: constraint", "== 0.0 # constraint for each time period after that not involving the", "in this one => # the value is the minimum number of subsequent", "- m.ReserveRequirement[t] constraint = constraint == m.TotalDemand[t] + m.GlobalLoadGenerateMismatch[t] return constraint def calculate_regulating_reserve_up_available_per_generator(m,", "g in m.Generators) if has_non_dispatchable_generators is True: constraint = constraint + sum(m.NondispatchablePowerUsed[n,t] for", "@simple_constraint_rule def enforce_down_time_constraints_subsequent(m, g, t): if t <= value(m.InitialTimePeriodsOffLine[g]): # handled by the", "t] for b in m.Buses for t in m.GenerationTimeInStage[st]) + \\ sum(m.posGlobalLoadGenerateMismatch[t] +", "- this is the strangest case. # (1, 0) - unit switching off:", "m.LinePower[l,t] == m.B[l] * (m.Angle[m.BusFrom[l], t] - m.Angle[m.BusTo[l], t]) def calculate_total_demand(m, t): return", "m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) def enforce_ramp_down_limits_rule(m, g, t): # 4", "#if t == value(m.NumTimePeriods): # return Constraint.Skip #else: # return m.MaximumPowerAvailable[g, t] <=", "in m.TimePeriods if n >= t and n <= (t + value(m.MinimumUpTime[g]) -", "for line between {} and {} \".format(m.BusFrom[l], m.BusTo[l])) return Constraint.Skip else: return m.LinePower[l,t]", ") + \\ -1 * ( m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) )", "on. if t == 0: return sum(m.UnitOn[g, n] for n in m.TimePeriods if", "b): return sum((m.posLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0 def power_balance(m, b, t,", "True: constraint = constraint + sum(m.NondispatchablePowerUsed[n,t] for n in m.NondispatchableGenerators) if has_storage is", "is True: constraint = constraint + sum(m.NondispatchablePowerUsed[n,t] for n in m.NondispatchableGenerators) if has_storage", "- unit switching on: RHS = startup ramp limit # (1, 0) -", "t): return m.posLoadGenerateMismatch[b, t] - m.negLoadGenerateMismatch[b, t] == m.LoadGenerateMismatch[b, t] def global_posneg_rule(m, t):", "in m.LinesTo[b]) \\ - sum(m.LinePower[l,t] for l in m.LinesFrom[b]) \\ + m.LoadGenerateMismatch[b,t] ==", "model.ComputeHotStart = Constraint(model.Generators, model.TimePeriods, rule=compute_hot_start_rule) model.ComputeStartupCostsMinusM = Constraint(model.Generators, model.TimePeriods, rule=compute_startup_costs_rule_minusM) model.ComputeShutdownCosts = Constraint(model.Generators,", "n <= (t + value(m.MinimumUpTime[g]) - 1)) >= \\ m.MinimumUpTime[g] * (m.UnitOn[g, t]", "t): if t <= value(m.InitialTimePeriodsOnLine[g]): # handled by the EnforceUpTimeConstraintInitial constraint. return Constraint.Skip", "(t, t+1) unit status # (0, 0) - unit staying off: RHS =", "in m.Generators)) def generation_in_stage_st_cost_rule(m, st): return m.GenerationStageCost[st] == sum(m.ProductionCost[g, t] for g in", "constraint = constraint + m.LoadGenerateMismatch[b,t] constraint = constraint - m.Demand[b, t] constraint =", "must remain off-line until the end of the time span. if t ==", "for g in m.Generators for t in m.GenerationTimeInStage[st]) + m.LoadMismatchPenalty * \\ (sum(m.posLoadGenerateMismatch[b,", "def line_power_rule(m, l, t): if m.B[l] == 99999999: logger.debug(\" Line Power Angle constraint", "#NOTE: This may never be physically true, but if a generator has ShutdownRampLimit", "def power_balance(m, b, t, has_storage=False, has_non_dispatchable_generators=False): # Power balance at each node (S)", "RHS = maximum generator output (degenerate upper bound) # (0, 1) - unit", "+ \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t]", "(m.UnitOn[g, t-1] - m.UnitOn[g, t])) for n in m.TimePeriods if n >= t)", "constraint return constraint # give meaning to the positive and negative parts of", "m.UnitOn[g, t])) for n in m.TimePeriods if n >= t) >= 0.0 else:", "== m.MaximumPowerAvailable[g,t] - m.PowerGenerated[g,t] def enforce_zonal_reserve_requirement_rule(m, rz, t): return sum(m.RegulatingReserveUpAvailable[g,t] for g in", "(value(m.NumTimePeriods - m.MinimumDownTime[g]) + 1): # the right-hand side terms below are only", "m.NetPowerInjectionAtBus[b, t] == constraint return constraint # give meaning to the positive and", "constraint_power_balance(model, has_storage=False, has_non_dispatchable_generators=False): fn_power_balance = partial(power_balance, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.PowerBalance = Constraint(model.Buses, model.TimePeriods, rule=fn_power_balance)", "unit is required to be on. if t == 0: return sum(m.UnitOn[g, n]", "Not 100% sure of this one since there is no MaximumPowerAvailableT0 return m.PowerGeneratedT0[g]", "model.PTDF = ptdf model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_ptdf_rule) else: partial_fix_first_angle_rule = partial(fix_first_angle_rule, slack_bus=slack_bus)", "unit switching off: RHS = shutdown ramp limit # (1, 1) - unit", "# (1, 1) - unit staying on: RHS = standard ramp-down limit if", "m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) ) else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g,", "return constraint def net_power_at_bus_rule(m, b, t, has_storage=False, has_non_dispatchable_generators=False): constraint = sum((1 - m.GeneratorForcedOutage[g,t])", "m.PowerGenerated[g, t-1]) # (0, 0) - unit staying off: RHS = maximum generator", "m.UnitOn[g, n]) - (m.UnitOn[g, t-1] - m.UnitOn[g, t])) for n in m.TimePeriods if", "has_regulating_reserves=True, has_zonal_reserves=False): if has_global_reserves is True: fn_enforce_reserve_requirements = partial(enforce_reserve_requirements_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators, has_global_reserves=has_global_reserves) model.EnforceReserveRequirements", "for s in m.Storage) if has_global_reserves is True: constraint = constraint - m.ReserveRequirement[t]", "for i, b in enumerate(m.Buses)) def line_power_rule(m, l, t): if m.B[l] == 99999999:", "<= m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] def enforce_max_available_ramp_up_rates_rule(m, g, t): # 4 cases,", "- m.HotStartCost[g])*m.HotStart[g, t] \\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOnT0[g])) else: return", "shutdown costs. def compute_shutdown_costs_rule(m, g, t): if t == 0: return m.ShutdownCost[g, t]", "m.StageCost[st] == m.GenerationStageCost[st] + m.CommitmentStageCost[st] def total_cost_objective_rule(m): return sum(m.StageCost[st] for st in m.StageSet)", "0: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) else: return", "return m.MaximumPowerAvailable[g,t] <= m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] def enforce_max_available_ramp_up_rates_rule(m, g, t): #", "off) # (0, 1) - unit switching on: RHS = startup ramp limit", "sum(m.Demand[b,t] for b in m.Buses) def neg_load_generate_mismatch_tolerance_rule(m, b): return sum((m.negLoadGenerateMismatch[b,t] for t in", "# (0, 1) - unit switching on: RHS = standard ramp-down limit minus", "rule=calculate_total_demand) def constraint_load_generation_mismatch(model): model.PosLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule) model.NegLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule) model.Defineposneg_Mismatch =", ">= 0.0 else: return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOn[g, t-1])) for", "t in m.TimePeriods if t <= value(m.InitialTimePeriodsOffLine[g])) == 0.0 # constraint for each", "m.TimePeriods if t <= value(m.InitialTimePeriodsOnLine[g])) == 0.0 # constraint for each time period", "sure of this one since there is no MaximumPowerAvailableT0 return m.PowerGeneratedT0[g] <= \\", "= constraint + sum(m.NondispatchablePowerUsed[g, t] for g in m.NondispatchableGeneratorsAtBus[b]) constraint = constraint +", "sum(m.NondispatchablePowerUsed[g, t] for g in m.NondispatchableGeneratorsAtBus[b]) constraint = constraint + m.LoadGenerateMismatch[b,t] constraint =", "for b in m.Buses) def neg_load_generate_mismatch_tolerance_rule(m, b): return sum((m.negLoadGenerateMismatch[b,t] for t in m.TimePeriods))", "- unit staying on: RHS = standard ramp-down limit if t == 0:", "rule=enforce_ramp_down_limits_rule) model.EnforceNominalRampUpLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_up_limits_rule) def constraint_up_down_time(model): model.EnforceUpTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_up_time_constraints_initial) model.EnforceUpTimeConstraintsSubsequent", "def net_power_at_bus_rule(m, b, t, has_storage=False, has_non_dispatchable_generators=False): constraint = sum((1 - m.GeneratorForcedOutage[g,t]) * m.GeneratorBusContributionFactor[g,", "if has_global_reserves is True: fn_enforce_reserve_requirements = partial(enforce_reserve_requirements_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators, has_global_reserves=has_global_reserves) model.EnforceReserveRequirements = Constraint(model.TimePeriods,", "Constraint(model.Generators, model.TimePeriods, rule=enforce_up_time_constraints_subsequent) model.EnforceDownTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_down_time_constraints_initial) model.EnforceDownTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_down_time_constraints_subsequent) def", "(sum(m.StartupCost[g,t] + m.ShutdownCost[g,t] for g in m.Generators for t in m.CommitmentTimeInStage[st]) + sum(sum(m.UnitOn[g,t]", "(m.UnitOnT0[g] - m.UnitOn[g, t]) else: return sum((1 - m.UnitOn[g, n]) for n in", "t) ) else: return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i in", "= Constraint(model.StageSet, rule = commitment_in_stage_st_cost_rule) model.Compute_generation_in_stage_st_cost = Constraint(model.StageSet, rule = generation_in_stage_st_cost_rule) model.Compute_Stage_Cost =", "m.ShutdownCostCoefficient[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) def enforce_up_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOnLine[g]) ==", "n >= t) >= 0.0 def commitment_in_stage_st_cost_rule(m, st): return m.CommitmentStageCost[st] == (sum(m.StartupCost[g,t] +", "on: RHS = standard ramp-down limit if t == 0: return m.PowerGeneratedT0[g] -", "rule=calculate_regulating_reserve_up_available_per_generator) if has_zonal_reserves is True: model.EnforceZonalReserveRequirements = Constraint(model.ReserveZones, model.TimePeriods, rule=enforce_zonal_reserve_requirement_rule) def constraint_generator_power(model): model.EnforceGeneratorOutputLimitsPartA", ">= t and n <= (t + value(m.MinimumUpTime[g]) - 1)) >= \\ m.MinimumUpTime[g]", "# Power balance at each node (S) # bus b, time t (S)", ">= 0.0 else: return sum(((1 - m.UnitOn[g, n]) - (m.UnitOn[g, t-1] - m.UnitOn[g,", "def compute_hot_start_rule(m, g, t): if t <= value(m.ColdStartHours[g]): if t - value(m.ColdStartHours[g]) <=", "one since there is no MaximumPowerAvailableT0 return m.PowerGeneratedT0[g] <= \\ m.MaximumPowerOutput[g, t] *", "t <= value(m.InitialTimePeriodsOffLine[g])) == 0.0 # constraint for each time period after that", "Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_down_limits_rule) model.EnforceNominalRampUpLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_up_limits_rule) def constraint_up_down_time(model): model.EnforceUpTimeConstraintsInitial = Constraint(model.Generators,", "that not involving the initial condition. @simple_constraint_rule def enforce_down_time_constraints_subsequent(m, g, t): if t", "enforce_ramp_up_limits_rule(m, g, t): # 4 cases, split by (t-1, t) unit status: #", "- (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOnT0[g]))", "def fix_first_angle_rule(m,t, slack_bus=1): return m.Angle[m.Buses[slack_bus], t] == 0.0 def lower_line_power_bounds_rule(m, l, t): if", "Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule) model.Defineposneg_Mismatch = Constraint(model.Buses, model.TimePeriods, rule = posneg_rule) model.Global_Defineposneg_Mismatch = Constraint(model.TimePeriods, rule", "has_storage=False, has_non_dispatchable_generators=False): # Power balance at each node (S) # bus b, time", "logger = logging.getLogger(__file__) eps = 1e-3 def fix_first_angle_rule(m,t, slack_bus=1): return m.Angle[m.Buses[slack_bus], t] ==", "is the strangest case. # (1, 0) - unit switching off: RHS =", "* (m.UnitOn[g, t-1] - m.UnitOn[g, t]) def enforce_ramp_down_limits_rule(m, g, t): # 4 cases,", "are only positive if the unit was off in the previous time period", "n in m.TimePeriods if n >= t and n <= (t + value(m.MinimumDownTime[g])", "m.RegulatingReserveUpAvailable[g, t] == m.MaximumPowerAvailable[g,t] - m.PowerGenerated[g,t] def enforce_zonal_reserve_requirement_rule(m, rz, t): return sum(m.RegulatingReserveUpAvailable[g,t] for", "= Constraint(model.TimePeriods, rule = global_posneg_rule) def constraint_power_balance(model, has_storage=False, has_non_dispatchable_generators=False): fn_power_balance = partial(power_balance, has_storage=has_storage,", "constraint_load_generation_mismatch(model): model.PosLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule) model.NegLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule) model.Defineposneg_Mismatch = Constraint(model.Buses, model.TimePeriods,", "this interval, it must remain on-line until the end of the time span.", "model.LinePowerConstraintLower = Constraint(model.TransmissionLines, model.TimePeriods, rule=lower_line_power_bounds_rule) model.LinePowerConstraintHigher = Constraint(model.TransmissionLines, model.TimePeriods, rule=upper_line_power_bounds_rule) if ptdf is", "* m.UnitOn[g, t] ) + \\ -1 * ( m.StartupRampLimit[g] * (m.UnitOnT0[g] -", "1)) >= \\ m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1]) else: # handle", "\\ m.MaximumPowerOutput[g, t] * m.UnitOn[g,t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g,t]) else:", "constraint = constraint == m.TotalDemand[t] + m.GlobalLoadGenerateMismatch[t] return constraint def calculate_regulating_reserve_up_available_per_generator(m, g, t):", "1) - unit staying on: RHS = standard ramp limit plus power generated", "l in m.LinesFrom[b]) \\ + m.LoadGenerateMismatch[b,t] == 0 return constraint def net_power_at_bus_rule(m, b,", "constraint due to initial conditions. def enforce_down_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOffLine[g]) == 0: return", "m.GeneratorForcedOutage[g,t]) * m.GeneratorBusContributionFactor[g, b] * m.PowerGenerated[g, t] for g in m.GeneratorsAtBus[b]) if has_storage", "model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_ptdf_rule) else: partial_fix_first_angle_rule = partial(fix_first_angle_rule, slack_bus=slack_bus) model.FixFirstAngle = Constraint(model.TimePeriods,", "m.MinimumProductionCost[g, t] for t in m.CommitmentTimeInStage[st]) * m.TimePeriodLength for g in m.Generators)) def", "startup ramp limit plus maximum power output (degenerate upper bound due to unit", "m.Generators for t in m.CommitmentTimeInStage[st]) + sum(sum(m.UnitOn[g,t] * m.MinimumProductionCost[g, t] for t in", "- unit staying on: RHS = standard ramp limit plus power generated in", "* m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) + \\", "periods - if a unit is started up in # this interval, it", "# can happen when small time horizons are specified return sum(((1 - m.UnitOn[g,", "fn_enforce_reserve_requirements = partial(enforce_reserve_requirements_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators, has_global_reserves=has_global_reserves) model.EnforceReserveRequirements = Constraint(model.TimePeriods, rule=fn_enforce_reserve_requirements) if has_regulating_reserves is", "return m.PowerGenerated[g,t] <= m.MaximumPowerAvailable[g, t] def enforce_generator_output_limits_rule_part_c(m, g, t): return m.MaximumPowerAvailable[g,t] <= m.MaximumPowerOutput[g,", "b in m.Buses) def neg_load_generate_mismatch_tolerance_rule(m, b): return sum((m.negLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >=", "def global_posneg_rule(m, t): return m.posGlobalLoadGenerateMismatch[t] - m.negGlobalLoadGenerateMismatch[t] == m.GlobalLoadGenerateMismatch[t] def enforce_reserve_requirements_rule(m, t, has_storage=False,", "shutdown ramp limit # (1, 1) - unit staying on: RHS = standard", "i, b in enumerate(m.Buses)) def line_power_rule(m, l, t): if m.B[l] == 99999999: logger.debug(\"", "# handle the final (MinimumUpTime[g] - 1) time periods - if a unit", "t-1] - m.UnitOn[g, t]) def enforce_ramp_down_limits_rule(m, g, t): # 4 cases, split by", "<= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g,", "period shutdown costs. def compute_shutdown_costs_rule(m, g, t): if t == 0: return m.ShutdownCost[g,", "else: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) def", "has_non_dispatchable_generators is True: constraint = constraint + sum(m.NondispatchablePowerUsed[n,t] for n in m.NondispatchableGenerators) if", "m.MinimumDownTime[g]) + 1): # the right-hand side terms below are only positive if", "t-1] - m.UnitOn[g, t]) else: # handle the final (MinimumDownTime[g] - 1) time", "in range(1, t) ) else: return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for", "# (0, 1) - unit switching on: RHS = maximum generator output minus", "m.UnitOn[g, t-1]) ) # compute startup costs for each generator, for each time", "sum( m.UnitOn[g, i] for i in range(1, t) ) else: return m.HotStart[g, t]", "positive and negative parts of the mismatch def posneg_rule(m, b, t): return m.posLoadGenerateMismatch[b,", "m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) else: return sum(m.UnitOn[g, n] for n in", "* (m.UnitOn[g, t] - m.UnitOn[g, t+1]) #This version fixes the problem with ignoring", "<= value(m.InitialTimePeriodsOffLine[g]): # handled by the EnforceDownTimeConstraintInitial constraint. return Constraint.Skip elif t <=", "is True: constraint = constraint - m.ReserveRequirement[t] constraint = constraint == m.TotalDemand[t] +", "def enforce_ramp_up_limits_rule(m, g, t): # 4 cases, split by (t-1, t) unit status:", "0) - unit switching off: RHS = standard ramp limit minus startup ramp", "n >= t) >= 0.0 else: return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] -", "in # this interval, it must remain off-line until the end of the", "t <= value(m.InitialTimePeriodsOnLine[g])) == 0.0 # constraint for each time period after that", "1) - unit switching on: RHS = standard ramp-down limit minus shutdown ramp", "m.UnitOnT0[g]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t]) else: return m.MaximumPowerAvailable[g,", "* m.UnitOn[g,t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g,t]) else: return m.MaximumPowerAvailable[g, t-1]", "n <= (t + value(m.MinimumDownTime[g]) - 1)) >= \\ m.MinimumDownTime[g] * (m.UnitOn[g, t-1]", "0: # can happen when small time horizons are specified return sum(((1 -", "ramp-down limit if t == 0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] >= \\", "m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1]) + \\ m.MaximumPowerOutput[g, t] * (1", "l, t): if m.B[l] == 99999999: logger.debug(\" Line Power Angle constraint skipped for", "rule=enforce_generator_output_limits_rule_part_c) model.EnforceMaxAvailableRampUpRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_up_rates_rule) model.EnforceMaxAvailableRampDownRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_down_rates_rule) model.EnforceNominalRampDownLimits =", "* (m.UnitOnT0[g] - m.UnitOn[g, t]) else: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOn[g,", "sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOnT0[g])) for n in m.TimePeriods if n", "m.MaximumPowerOutput[g] * m.UnitOn[g, t+1] + \\ # m.ShutdownRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g,", "m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) + \\ m.MaximumPowerOutput[g, t] * (1 -", "return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] >= \\ -1 * ( m.NominalRampUpLimit[g] *", "m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1]) def enforce_ramp_up_limits_rule(m, g, t): # 4", "== 99999999: logger.debug(\" Line Power Angle constraint skipped for line between {} and", "\\ m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) + \\ m.MaximumPowerOutput[g, t] * (1", "== 0 return constraint def net_power_at_bus_rule(m, b, t, has_storage=False, has_non_dispatchable_generators=False): constraint = sum((1", "t] <= sum( m.UnitOn[g, i] for i in range(1, t) ) else: return", "\\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t] *", "model.EnforceGeneratorOutputLimitsPartA = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_a) model.EnforceGeneratorOutputLimitsPartB = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_b) model.EnforceGeneratorOutputLimitsPartC = Constraint(model.Generators,", "4 cases, split by (t-1, t) unit status: # (0, 0) - unit", "off: RHS = standard ramp limit minus startup ramp limit plus maximum power", "= Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_ptdf_rule) else: partial_fix_first_angle_rule = partial(fix_first_angle_rule, slack_bus=slack_bus) model.FixFirstAngle = Constraint(model.TimePeriods, rule=partial_fix_first_angle_rule)", "switching on: RHS = startup ramp limit # (1, 0) - unit switching", "conditions to t=1! #if t == value(m.NumTimePeriods): # return Constraint.Skip #else: # return", "meaning to the positive and negative parts of the mismatch def posneg_rule(m, b,", "# compute startup costs for each generator, for each time period def compute_hot_start_rule(m,", "def calculate_total_demand(m, t): return m.TotalDemand[t] == sum(m.Demand[b,t] for b in m.Buses) def neg_load_generate_mismatch_tolerance_rule(m,", "t, has_storage=False, has_non_dispatchable_generators=False): # Power balance at each node (S) # bus b,", "m.NetPowerInjectionAtBus[b, t] for i, b in enumerate(m.Buses)) def line_power_rule(m, l, t): if m.B[l]", "conditions. def enforce_down_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOffLine[g]) == 0: return Constraint.Skip return sum(m.UnitOn[g, t]", "-1 * ( m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) ) else: return m.PowerGenerated[g,", "model.NegLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule) model.Defineposneg_Mismatch = Constraint(model.Buses, model.TimePeriods, rule = posneg_rule) model.Global_Defineposneg_Mismatch =", "rule=line_power_ptdf_rule) else: partial_fix_first_angle_rule = partial(fix_first_angle_rule, slack_bus=slack_bus) model.FixFirstAngle = Constraint(model.TimePeriods, rule=partial_fix_first_angle_rule) model.CalculateLinePower = Constraint(model.TransmissionLines,", "m.UnitOn[g, t]) else: # handle the final (MinimumDownTime[g] - 1) time periods -", "handle the final (MinimumDownTime[g] - 1) time periods - if a unit is", "t == 0: # Not 100% sure of this one since there is", "down from initial conditions to t=1! #if t == value(m.NumTimePeriods): # return Constraint.Skip", "unit staying on: RHS = maximum generator output (degenerate upper bound) #NOTE: As", "return m.StageCost[st] == m.GenerationStageCost[st] + m.CommitmentStageCost[st] def total_cost_objective_rule(m): return sum(m.StageCost[st] for st in", "unit staying on: RHS = standard ramp limit plus power generated in previous", "above note if t == 0: # Not 100% sure of this one", "Constraint.Skip elif t <= (value(m.NumTimePeriods - m.MinimumUpTime[g]) + 1): # the right-hand side", "maximum generator output (degenerate upper bound) #NOTE: As expressed in Carrion-Arroyo and subsequently", "rule=compute_hot_start_rule) model.ComputeStartupCostsMinusM = Constraint(model.Generators, model.TimePeriods, rule=compute_startup_costs_rule_minusM) model.ComputeShutdownCosts = Constraint(model.Generators, model.TimePeriods, rule=compute_shutdown_costs_rule) model.Compute_commitment_in_stage_st_cost =", "sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOn[g, t-1])) for n in m.TimePeriods if", "1) - unit switching on: RHS = maximum generator output minus shutdown ramp", "= standard ramp-down limit minus shutdown ramp limit plus maximum generator output -", "m.LinesTo[b]) \\ - sum(m.LinePower[l,t] for l in m.LinesFrom[b]) \\ + m.LoadGenerateMismatch[b,t] == 0", "else: return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i in range(1, t)", "<= (value(m.NumTimePeriods - m.MinimumDownTime[g]) + 1): # the right-hand side terms below are", "b in m.Buses for t in m.GenerationTimeInStage[st]) + \\ sum(m.posGlobalLoadGenerateMismatch[t] + m.negGlobalLoadGenerateMismatch[t] for", "+ \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g,t]) else: return m.MaximumPowerAvailable[g, t-1] <= \\", "import numpy as np from functools import partial import logging from pyomo.environ import", "t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return m.ThermalLimit[l] >= m.LinePower[l, t] else:", "m.LoadGenerateMismatch[b,t] == 0 return constraint def net_power_at_bus_rule(m, b, t, has_storage=False, has_non_dispatchable_generators=False): constraint =", "= Constraint(model.TransmissionLines, model.TimePeriods, rule=upper_line_power_bounds_rule) if ptdf is not None: model.PTDF = ptdf model.CalculateLinePower", "> eps): return m.ThermalLimit[l] >= m.LinePower[l, t] else: return Constraint.Skip def line_power_ptdf_rule(m, l,", "l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return -m.ThermalLimit[l] <= m.LinePower[l, t]", "the above note if t == 0: # Not 100% sure of this", "on: RHS = startup ramp limit # (1, 0) - unit switching off:", "t-1] - m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g]", "+ \\ -1 * ( m.StartupRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) )", "in the above note if t == 0: # Not 100% sure of", "(RHS is defined as the delta from m.PowerGenerated[g, t-1]) # (0, 0) -", "to unit being off) # (0, 1) - unit switching on: RHS =", "m.ColdStartHours[g], t) ) def compute_startup_costs_rule_minusM(m, g, t): if t == 0: return m.StartupCost[g,", "after that not involving the initial condition. @simple_constraint_rule def enforce_up_time_constraints_subsequent(m, g, t): if", "in m.GeneratorsAtBus[b]) if has_storage is True: constraint = constraint + sum(m.PowerOutputStorage[s, t] for", "+ \\ # m.ShutdownRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t+1]) #This version fixes", "else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t]", "= partial(net_power_at_bus_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.CalculateNetPowerAtBus = Constraint(model.Buses, model.TimePeriods, rule=partial_net_power_at_bus_rule) ################################################ def constraint_line(model, ptdf=None,", "(0, 1) - unit switching on: RHS = maximum generator output minus shutdown", "subsequent consecutive time periods that the unit is required to be on. if", "* (m.UnitOn[g, t-1] - m.UnitOn[g, t]) def enforce_up_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOnLine[g]) == 0:", "- (m.UnitOnT0[g] - m.UnitOn[g, t])) for n in m.TimePeriods if n >= t)", "t-1] + \\ m.NominalRampUpLimit[g] * m.UnitOn[g, t-1] + \\ m.StartupRampLimit[g] * (m.UnitOn[g, t]", "m.UnitOn[g, i] for i in range(1, t) ) else: return m.HotStart[g, t] <=", "if n >= t) >= 0.0 else: return sum(((1 - m.UnitOn[g, n]) -", "model.CalculateNetPowerAtBus = Constraint(model.Buses, model.TimePeriods, rule=partial_net_power_at_bus_rule) ################################################ def constraint_line(model, ptdf=None, slack_bus=1): model.LinePowerConstraintLower = Constraint(model.TransmissionLines,", "t): return m.RegulatingReserveUpAvailable[g, t] == m.MaximumPowerAvailable[g,t] - m.PowerGenerated[g,t] def enforce_zonal_reserve_requirement_rule(m, rz, t): return", "the initial condition. @simple_constraint_rule def enforce_up_time_constraints_subsequent(m, g, t): if t <= value(m.InitialTimePeriodsOnLine[g]): #", "specified return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOnT0[g])) for n in m.TimePeriods", "strangest case. # (1, 0) - unit switching off: RHS = shutdown ramp", "(m.UnitOn[g, t] - m.UnitOnT0[g]) else: return sum(m.UnitOn[g, n] for n in m.TimePeriods if", "is required to be on. if t == 0: return sum(m.UnitOn[g, n] for", "t] for s in m.StorageAtBus[b]) if has_non_dispatchable_generators is True: constraint = constraint +", "# return Constraint.Skip #else: # return m.MaximumPowerAvailable[g, t] <= \\ # m.MaximumPowerOutput[g] *", "in m.TimePeriods if n >= t) >= 0.0 else: return sum((m.UnitOn[g, n] -", "g in m.GeneratorsAtBus[b]) if has_storage is True: constraint = constraint + sum(m.PowerOutputStorage[s, t]", "ShutdownRampLimit >> MaximumPowerOutput, this constraint causes problems # (1, 0) - unit switching", "in m.Buses for t in m.GenerationTimeInStage[st]) + \\ sum(m.posGlobalLoadGenerateMismatch[t] + m.negGlobalLoadGenerateMismatch[t] for t", "if has_storage is True: constraint = constraint + sum(m.PowerOutputStorage[s, t] for s in", "if t == 0: return sum((1 - m.UnitOn[g, n]) for n in m.TimePeriods", "constraint - m.Demand[b, t] constraint = m.NetPowerInjectionAtBus[b, t] == constraint return constraint #", "st): return m.CommitmentStageCost[st] == (sum(m.StartupCost[g,t] + m.ShutdownCost[g,t] for g in m.Generators for t", "-1 * ( m.StartupRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) ) + \\ -1", "<filename>psst/model/constraints.py import numpy as np from functools import partial import logging from pyomo.environ", "return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) def enforce_up_time_constraints_initial(m,", "(m.UnitOn[g, t] - m.UnitOn[g, t-1]) else: # handle the final (MinimumUpTime[g] - 1)", "model.EnforceReserveRequirements = Constraint(model.TimePeriods, rule=fn_enforce_reserve_requirements) if has_regulating_reserves is True: model.CalculateRegulatingReserveUpPerGenerator = Constraint(model.Generators, model.TimePeriods, rule=calculate_regulating_reserve_up_available_per_generator)", "minus shutdown ramp limit plus maximum generator output - this is the strangest", "i] for i in range(t - m.ColdStartHours[g], t) ) def compute_startup_costs_rule_minusM(m, g, t):", "t - value(m.ColdStartHours[g]) <= value(m.UnitOnT0State[g]): m.HotStart[g, t] = 1 m.HotStart[g, t].fixed = True", "model.EnforceUpTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_up_time_constraints_initial) model.EnforceUpTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_up_time_constraints_subsequent) model.EnforceDownTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_down_time_constraints_initial)", "t] for g in m.NondispatchableGeneratorsAtBus[b]) constraint = constraint + m.LoadGenerateMismatch[b,t] constraint = constraint", "- (m.UnitOn[g, t] - m.UnitOnT0[g])) for n in m.TimePeriods if n >= t)", "being off) # (0, 1) - unit switching on: RHS = startup ramp", "t] >= m.ShutdownCostCoefficient[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) else: return m.ShutdownCost[g, t] >=", ">= t) >= 0.0 def commitment_in_stage_st_cost_rule(m, st): return m.CommitmentStageCost[st] == (sum(m.StartupCost[g,t] + m.ShutdownCost[g,t]", "model.TimePeriods, rule=enforce_ramp_up_limits_rule) def constraint_up_down_time(model): model.EnforceUpTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_up_time_constraints_initial) model.EnforceUpTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_up_time_constraints_subsequent)", "- (m.UnitOn[g, t-1] - m.UnitOn[g, t])) for n in m.TimePeriods if n >=", ">= \\ -1 * ( m.NominalRampUpLimit[g] * m.UnitOn[g, t] ) + \\ -1", "# constraint due to initial conditions. def enforce_down_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOffLine[g]) == 0:", "if n >= t and n <= (t + value(m.MinimumDownTime[g]) - 1)) >=", "0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] >= \\ -1 * ( m.NominalRampUpLimit[g] *", "<= \\ # m.MaximumPowerOutput[g] * m.UnitOn[g, t+1] + \\ # m.ShutdownRampLimit[g] * (m.UnitOn[g,", "constraint = m.NetPowerInjectionAtBus[b, t] == constraint return constraint # give meaning to the", "if t == 0: # can happen when small time horizons are specified", "t] * m.UnitOn[g,t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g,t]) else: return m.MaximumPowerAvailable[g,", "on: RHS = maximum generator output (degenerate upper bound) #NOTE: As expressed in", "t <= value(m.InitialTimePeriodsOffLine[g]): # handled by the EnforceDownTimeConstraintInitial constraint. return Constraint.Skip elif t", "sum((1 - m.UnitOn[g, n]) for n in m.TimePeriods if n >= t and", "from m.PowerGenerated[g, t-1]) # (0, 0) - unit staying off: RHS = maximum", "m.NominalRampUpLimit[g] * m.UnitOn[g, t] ) + \\ -1 * ( m.StartupRampLimit[g] * (m.UnitOn[g,", "commitment_in_stage_st_cost_rule) model.Compute_generation_in_stage_st_cost = Constraint(model.StageSet, rule = generation_in_stage_st_cost_rule) model.Compute_Stage_Cost = Constraint(model.StageSet, rule = StageCost_rule)", "True: model.EnforceZonalReserveRequirements = Constraint(model.ReserveZones, model.TimePeriods, rule=enforce_zonal_reserve_requirement_rule) def constraint_generator_power(model): model.EnforceGeneratorOutputLimitsPartA = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_a)", "sum(m.NondispatchablePowerUsed[n,t] for n in m.NondispatchableGenerators) if has_storage is True: constraint = constraint +", "t == value(m.NumTimePeriods): # return Constraint.Skip #else: # return m.MaximumPowerAvailable[g, t] <= \\", "in m.NondispatchableGeneratorsAtBus[b]) constraint = constraint + m.LoadGenerateMismatch[b,t] constraint = constraint - m.Demand[b, t]", "* (m.UnitOn[g, t-1] - m.UnitOn[g, t]) else: # handle the final (MinimumDownTime[g] -", ">> MaximumPowerOutput, this constraint causes problems # (1, 0) - unit switching off:", "m.GenerationTimeInStage[st]) + m.LoadMismatchPenalty * \\ (sum(m.posLoadGenerateMismatch[b, t] + m.negLoadGenerateMismatch[b, t] for b in", "limit # (1, 0) - unit switching off: RHS = standard ramp limit", "switching on: RHS = standard ramp-down limit minus shutdown ramp limit plus maximum", "ramp-down limit if t == 0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] <= \\", "def enforce_up_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOnLine[g]) == 0: return Constraint.Skip return sum((1 - m.UnitOn[g,", "m.posGlobalLoadGenerateMismatch[t] - m.negGlobalLoadGenerateMismatch[t] == m.GlobalLoadGenerateMismatch[t] def enforce_reserve_requirements_rule(m, t, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=False): constraint =", "(t + value(m.MinimumUpTime[g]) - 1)) >= \\ m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOn[g,", "def production_cost_function(m, g, t, x): # a function for use in piecewise linearization", "m.NominalRampUpLimit[g] * m.UnitOn[g, t-1] + \\ m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1])", "t, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=False): constraint = sum(m.MaximumPowerAvailable[g, t] for g in m.Generators) if", "100% sure of this one since there is no MaximumPowerAvailableT0 return m.PowerGeneratedT0[g] <=", "return Constraint.Skip return sum(m.UnitOn[g, t] for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOffLine[g]))", "m.UnitOnT0[g])) for n in m.TimePeriods if n >= t) >= 0.0 else: return", "= Constraint(model.Generators, model.TimePeriods, rule=enforce_up_time_constraints_subsequent) model.EnforceDownTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_down_time_constraints_initial) model.EnforceDownTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_down_time_constraints_subsequent)", "balance at each node (S) # bus b, time t (S) constraint =", "Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_down_rates_rule) model.EnforceNominalRampDownLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_down_limits_rule) model.EnforceNominalRampUpLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_up_limits_rule)", "(m.UnitOn[g, t] - m.UnitOn[g, t-1]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g,", "total_cost_objective_rule(m): return sum(m.StageCost[st] for st in m.StageSet) def constraint_net_power(model, has_storage=False, has_non_dispatchable_generators=False): partial_net_power_at_bus_rule =", "(t + value(m.MinimumDownTime[g]) - 1)) >= \\ m.MinimumDownTime[g] * (m.UnitOn[g, t-1] - m.UnitOn[g,", "- m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOn[g, t-1])) # compute the per-generator, per-time", "Constraint(model.ReserveZones, model.TimePeriods, rule=enforce_zonal_reserve_requirement_rule) def constraint_generator_power(model): model.EnforceGeneratorOutputLimitsPartA = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_a) model.EnforceGeneratorOutputLimitsPartB = Constraint(model.Generators,", "neg_load_generate_mismatch_tolerance_rule(m, b): return sum((m.negLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0 def pos_load_generate_mismatch_tolerance_rule(m, b):", "sum(m.LinePower[l,t] for l in m.LinesFrom[b]) \\ + m.LoadGenerateMismatch[b,t] == 0 return constraint def", "+ sum(m.NondispatchablePowerUsed[n,t] for n in m.NondispatchableGenerators) if has_storage is True: constraint = constraint", "Constraint(model.Generators, model.TimePeriods, rule=compute_hot_start_rule) model.ComputeStartupCostsMinusM = Constraint(model.Generators, model.TimePeriods, rule=compute_startup_costs_rule_minusM) model.ComputeShutdownCosts = Constraint(model.Generators, model.TimePeriods, rule=compute_shutdown_costs_rule)", "output (degenerate upper bound due to unit off) # (1, 1) - unit", "t): if t == 0: return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] -", "def enforce_reserve_requirements_rule(m, t, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=False): constraint = sum(m.MaximumPowerAvailable[g, t] for g in", "t): # 4 cases, split by (t-1, t) unit status: # (0, 0)", "m.TimePeriods if n >= t) >= 0.0 def commitment_in_stage_st_cost_rule(m, st): return m.CommitmentStageCost[st] ==", "model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_rule) def constraint_total_demand(model): model.CalculateTotalDemand = Constraint(model.TimePeriods, rule=calculate_total_demand) def constraint_load_generation_mismatch(model):", "return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOn[g, t-1])) for n in m.TimePeriods", "rule=enforce_up_time_constraints_initial) model.EnforceUpTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_up_time_constraints_subsequent) model.EnforceDownTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_down_time_constraints_initial) model.EnforceDownTimeConstraintsSubsequent = Constraint(model.Generators,", "for t in m.GenerationTimeInStage[st])) def StageCost_rule(m, st): return m.StageCost[st] == m.GenerationStageCost[st] + m.CommitmentStageCost[st]", "np.any(np.absolute(m.ThermalLimit[l]) > eps): return m.ThermalLimit[l] >= m.LinePower[l, t] else: return Constraint.Skip def line_power_ptdf_rule(m,", ">= t) >= 0.0 else: return sum(((1 - m.UnitOn[g, n]) - (m.UnitOn[g, t-1]", "unit being off) # (0, 1) - unit switching on: RHS = startup", "- value(m.ColdStartHours[g]) <= value(m.UnitOnT0State[g]): m.HotStart[g, t] = 1 m.HotStart[g, t].fixed = True return", "s in m.Storage) if has_global_reserves is True: constraint = constraint - m.ReserveRequirement[t] constraint", "* m.NetPowerInjectionAtBus[b, t] for i, b in enumerate(m.Buses)) def line_power_rule(m, l, t): if", "t]) def enforce_max_available_ramp_down_rates_rule(m, g, t): # 4 cases, split by (t, t+1) unit", "t]) def calculate_total_demand(m, t): return m.TotalDemand[t] == sum(m.Demand[b,t] for b in m.Buses) def", "= constraint + sum(m.PowerOutputStorage[s, t] for s in m.StorageAtBus[b]) \\ - sum(m.PowerInputStorage[s, t]", "- if a unit is shut down in # this interval, it must", "= Constraint(model.ReserveZones, model.TimePeriods, rule=enforce_zonal_reserve_requirement_rule) def constraint_generator_power(model): model.EnforceGeneratorOutputLimitsPartA = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_a) model.EnforceGeneratorOutputLimitsPartB =", "value(m.MinimumDownTime[g]) - 1)) >= \\ m.MinimumDownTime[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) else: return", "(1, 0) - unit switching off: RHS = standard ramp limit minus startup", "eps): return -m.ThermalLimit[l] <= m.LinePower[l, t] else: return Constraint.Skip def upper_line_power_bounds_rule(m, l, t):", "constraint skipped for line between {} and {} \".format(m.BusFrom[l], m.BusTo[l])) return Constraint.Skip else:", "numpy as np from functools import partial import logging from pyomo.environ import *", "\\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) def enforce_ramp_down_limits_rule(m, g, t): #", "are specified return sum(((1 - m.UnitOn[g, n]) - (m.UnitOnT0[g] - m.UnitOn[g, t])) for", "by (t-1, t) unit status (RHS is defined as the delta from m.PowerGenerated[g,", "# m.MaximumPowerOutput[g] * m.UnitOn[g, t+1] + \\ # m.ShutdownRampLimit[g] * (m.UnitOn[g, t] -", "model.TimePeriods, rule=calculate_regulating_reserve_up_available_per_generator) if has_zonal_reserves is True: model.EnforceZonalReserveRequirements = Constraint(model.ReserveZones, model.TimePeriods, rule=enforce_zonal_reserve_requirement_rule) def constraint_generator_power(model):", "== sum(m.ProductionCost[g, t] for g in m.Generators for t in m.GenerationTimeInStage[st]) + m.LoadMismatchPenalty", "is True: model.CalculateRegulatingReserveUpPerGenerator = Constraint(model.Generators, model.TimePeriods, rule=calculate_regulating_reserve_up_available_per_generator) if has_zonal_reserves is True: model.EnforceZonalReserveRequirements =", "in m.NondispatchableGenerators) if has_storage is True: constraint = constraint + sum(m.PowerOutputStorage[s,t] for s", "n]) - (m.UnitOnT0[g] - m.UnitOn[g, t])) for n in m.TimePeriods if n >=", "t-1]) def enforce_ramp_up_limits_rule(m, g, t): # 4 cases, split by (t-1, t) unit", "= Constraint(model.Generators, rule=enforce_down_time_constraints_initial) model.EnforceDownTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_down_time_constraints_subsequent) def production_cost_function(m, g, t, x):", "value(m.InitialTimePeriodsOnLine[g]): # handled by the EnforceUpTimeConstraintInitial constraint. return Constraint.Skip elif t <= (value(m.NumTimePeriods", "+ sum(m.PowerOutputStorage[s,t] for s in m.Storage) if has_global_reserves is True: constraint = constraint", "\\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t])", "m.CommitmentTimeInStage[st]) + sum(sum(m.UnitOn[g,t] * m.MinimumProductionCost[g, t] for t in m.CommitmentTimeInStage[st]) * m.TimePeriodLength for", "t == 0: return sum((1 - m.UnitOn[g, n]) for n in m.TimePeriods if", "unit status (RHS is defined as the delta from m.PowerGenerated[g, t-1]) # (0,", "cost function. return m.TimePeriodLength * m.PowerGenerationPiecewiseValues[g,t][x] * m.FuelCost[g] def constraint_for_cost(model): model.ComputeProductionCosts = Piecewise(model.Generators", "m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) + \\", "g): if value(m.InitialTimePeriodsOffLine[g]) == 0: return Constraint.Skip return sum(m.UnitOn[g, t] for t in", "rule=pos_load_generate_mismatch_tolerance_rule) model.NegLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule) model.Defineposneg_Mismatch = Constraint(model.Buses, model.TimePeriods, rule = posneg_rule) model.Global_Defineposneg_Mismatch", "(m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOnT0[g])) else:", "sum(float(m.PTDF[l, i]) * m.NetPowerInjectionAtBus[b, t] for i, b in enumerate(m.Buses)) def line_power_rule(m, l,", "t-1] <= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g,", "value(m.InitialTimePeriodsOnLine[g]) == 0: return Constraint.Skip return sum((1 - m.UnitOn[g, t]) for t in", "g, t): return m.MinimumPowerOutput[g, t] * m.UnitOn[g, t] <= m.PowerGenerated[g,t] def enforce_generator_output_limits_rule_part_b(m, g,", "for g in m.NondispatchableGeneratorsAtBus[b]) constraint = constraint + m.LoadGenerateMismatch[b,t] constraint = constraint -", "t] <= m.PowerGenerated[g, t-1] + \\ m.NominalRampUpLimit[g] * m.UnitOn[g, t-1] + \\ m.StartupRampLimit[g]", "def generation_in_stage_st_cost_rule(m, st): return m.GenerationStageCost[st] == sum(m.ProductionCost[g, t] for g in m.Generators for", "else: return m.MaximumPowerAvailable[g, t-1] <= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] + \\", "else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] >= \\ -1 * ( m.NominalRampUpLimit[g]", "range(t - m.ColdStartHours[g], t) ) def compute_startup_costs_rule_minusM(m, g, t): if t == 0:", "= maximum generator output (degenerate upper bound) # (0, 1) - unit switching", "t-1])) for n in m.TimePeriods if n >= t) >= 0.0 # constraint", "def line_power_ptdf_rule(m, l, t): return m.LinePower[l,t] == sum(float(m.PTDF[l, i]) * m.NetPowerInjectionAtBus[b, t] for", "return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) else: return m.ShutdownCost[g,", "model.TimePeriods, model.ProductionCost, model.PowerGenerated, pw_pts=model.PowerGenerationPiecewisePoints, f_rule=production_cost_function, pw_constr_type='LB', warning_tol=1e-20) model.ComputeHotStart = Constraint(model.Generators, model.TimePeriods, rule=compute_hot_start_rule) model.ComputeStartupCostsMinusM", "m.UnitOn[g, t]) for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOnLine[g])) == 0.0 #", "m.UnitOn[g, t]) else: return m.MaximumPowerAvailable[g, t] <= m.PowerGenerated[g, t-1] + \\ m.NominalRampUpLimit[g] *", "i] for i in range(1, t) ) else: return m.HotStart[g, t] <= sum(", "1 m.HotStart[g, t].fixed = True return Constraint.Skip else: return m.HotStart[g, t] <= sum(", "* (1 - m.UnitOn[g, t]) else: return m.MaximumPowerAvailable[g, t] <= m.PowerGenerated[g, t-1] +", "m.UnitOn[g, t+1] + \\ # m.ShutdownRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t+1]) #This", "output (degenerate upper bound) #NOTE: As expressed in Carrion-Arroyo and subsequently here, this", "generation_in_stage_st_cost_rule) model.Compute_Stage_Cost = Constraint(model.StageSet, rule = StageCost_rule) def objective_function(model): model.TotalCostObjective = Objective(rule=total_cost_objective_rule, sense=minimize)", "sum(sum(m.UnitOn[g,t] * m.MinimumProductionCost[g, t] for t in m.CommitmentTimeInStage[st]) * m.TimePeriodLength for g in", "has_storage is True: constraint = constraint + sum(m.PowerOutputStorage[s, t] for s in m.StorageAtBus[b])", "model.TimePeriods, rule=fn_power_balance) def constraint_reserves(model, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=True, has_regulating_reserves=True, has_zonal_reserves=False): if has_global_reserves is True:", "rule = generation_in_stage_st_cost_rule) model.Compute_Stage_Cost = Constraint(model.StageSet, rule = StageCost_rule) def objective_function(model): model.TotalCostObjective =", "- m.UnitOnT0[g]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t]) else: return", "True: constraint = constraint + sum(m.PowerOutputStorage[s,t] for s in m.Storage) if has_global_reserves is", "off) # (1, 1) - unit staying on: RHS = standard ramp limit", "n] - (m.UnitOn[g, t] - m.UnitOnT0[g])) for n in m.TimePeriods if n >=", "RHS = standard ramp-down limit minus shutdown ramp limit plus maximum generator output", "0.0 def commitment_in_stage_st_cost_rule(m, st): return m.CommitmentStageCost[st] == (sum(m.StartupCost[g,t] + m.ShutdownCost[g,t] for g in", "t] constraint = m.NetPowerInjectionAtBus[b, t] == constraint return constraint # give meaning to", "if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return -m.ThermalLimit[l] <= m.LinePower[l, t] else: return", "# the value is the minimum number of subsequent consecutive time periods that", "1) - unit switching on: RHS = startup ramp limit # (1, 0)", "the time span. if t == 0: # can happen when small time", "1e-3 def fix_first_angle_rule(m,t, slack_bus=1): return m.Angle[m.Buses[slack_bus], t] == 0.0 def lower_line_power_bounds_rule(m, l, t):", "condition. @simple_constraint_rule def enforce_down_time_constraints_subsequent(m, g, t): if t <= value(m.InitialTimePeriodsOffLine[g]): # handled by", "== sum(m.Demand[b,t] for b in m.Buses) def neg_load_generate_mismatch_tolerance_rule(m, b): return sum((m.negLoadGenerateMismatch[b,t] for t", "if has_non_dispatchable_generators is True: constraint = constraint + sum(m.NondispatchablePowerUsed[g, t] for g in", "Constraint.Skip return sum((1 - m.UnitOn[g, t]) for t in m.TimePeriods if t <=", "(value(m.NumTimePeriods - m.MinimumUpTime[g]) + 1): # the right-hand side terms below are only", "rule=compute_shutdown_costs_rule) model.Compute_commitment_in_stage_st_cost = Constraint(model.StageSet, rule = commitment_in_stage_st_cost_rule) model.Compute_generation_in_stage_st_cost = Constraint(model.StageSet, rule = generation_in_stage_st_cost_rule)", "return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i in range(1, t) )", "calculate_total_demand(m, t): return m.TotalDemand[t] == sum(m.Demand[b,t] for b in m.Buses) def neg_load_generate_mismatch_tolerance_rule(m, b):", "with ignoring initial conditions mentioned in the above note if t == 0:", "= Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_c) model.EnforceMaxAvailableRampUpRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_up_rates_rule) model.EnforceMaxAvailableRampDownRates = Constraint(model.Generators, model.TimePeriods,", "= Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_down_rates_rule) model.EnforceNominalRampDownLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_down_limits_rule) model.EnforceNominalRampUpLimits = Constraint(model.Generators, model.TimePeriods,", "= logging.getLogger(__file__) eps = 1e-3 def fix_first_angle_rule(m,t, slack_bus=1): return m.Angle[m.Buses[slack_bus], t] == 0.0", "return sum(((1 - m.UnitOn[g, n]) - (m.UnitOn[g, t-1] - m.UnitOn[g, t])) for n", "1) time periods - if a unit is started up in # this", "== 0: return Constraint.Skip return sum(m.UnitOn[g, t] for t in m.TimePeriods if t", "Constraint(model.Generators, model.TimePeriods, rule=calculate_regulating_reserve_up_available_per_generator) if has_zonal_reserves is True: model.EnforceZonalReserveRequirements = Constraint(model.ReserveZones, model.TimePeriods, rule=enforce_zonal_reserve_requirement_rule) def", "(degenerate upper bound) - this is the strangest case. # (1, 0) -", "cases, split by (t, t+1) unit status # (0, 0) - unit staying", "use in piecewise linearization of the cost function. return m.TimePeriodLength * m.PowerGenerationPiecewiseValues[g,t][x] *", "i in range(t - m.ColdStartHours[g], t) ) def compute_startup_costs_rule_minusM(m, g, t): if t", "def enforce_down_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOffLine[g]) == 0: return Constraint.Skip return sum(m.UnitOn[g, t] for", "def enforce_generator_output_limits_rule_part_a(m, g, t): return m.MinimumPowerOutput[g, t] * m.UnitOn[g, t] <= m.PowerGenerated[g,t] def", "strangest case #NOTE: This may never be physically true, but if a generator", "if t <= value(m.InitialTimePeriodsOnLine[g])) == 0.0 # constraint for each time period after", "t] for i, b in enumerate(m.Buses)) def line_power_rule(m, l, t): if m.B[l] ==", "for each time period after that not involving the initial condition. @simple_constraint_rule def", "* m.FuelCost[g] def constraint_for_cost(model): model.ComputeProductionCosts = Piecewise(model.Generators * model.TimePeriods, model.ProductionCost, model.PowerGenerated, pw_pts=model.PowerGenerationPiecewisePoints, f_rule=production_cost_function,", "-1 * ( m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1]) ) # compute", "m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g,", "horizons are specified return sum(((1 - m.UnitOn[g, n]) - (m.UnitOnT0[g] - m.UnitOn[g, t]))", "t]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) else: return m.PowerGenerated[g, t-1]", "+ value(m.MinimumUpTime[g]) - 1)) >= \\ m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1])", "* m.MinimumProductionCost[g, t] for t in m.CommitmentTimeInStage[st]) * m.TimePeriodLength for g in m.Generators))", "t] >= m.ShutdownCostCoefficient[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) def enforce_up_time_constraints_initial(m, g): if", "m.TimePeriodLength for g in m.Generators)) def generation_in_stage_st_cost_rule(m, st): return m.GenerationStageCost[st] == sum(m.ProductionCost[g, t]", "- m.UnitOnT0[g])) for n in m.TimePeriods if n >= t) >= 0.0 else:", "constraint = m.NetPowerInjectionAtBus[b, t] + sum(m.LinePower[l,t] for l in m.LinesTo[b]) \\ - sum(m.LinePower[l,t]", "= Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule) model.Defineposneg_Mismatch = Constraint(model.Buses, model.TimePeriods, rule = posneg_rule) model.Global_Defineposneg_Mismatch = Constraint(model.TimePeriods,", "handled by the EnforceUpTimeConstraintInitial constraint. return Constraint.Skip elif t <= (value(m.NumTimePeriods - m.MinimumUpTime[g])", "m.Buses) def neg_load_generate_mismatch_tolerance_rule(m, b): return sum((m.negLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0 def", "constraint = constraint + sum(m.NondispatchablePowerUsed[n,t] for n in m.NondispatchableGenerators) if has_storage is True:", "enforce_down_time_constraints_subsequent(m, g, t): if t <= value(m.InitialTimePeriodsOffLine[g]): # handled by the EnforceDownTimeConstraintInitial constraint.", "= Constraint(model.Generators, rule=enforce_up_time_constraints_initial) model.EnforceUpTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_up_time_constraints_subsequent) model.EnforceDownTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_down_time_constraints_initial) model.EnforceDownTimeConstraintsSubsequent", "def compute_startup_costs_rule_minusM(m, g, t): if t == 0: return m.StartupCost[g, t] >= m.ColdStartCost[g]", "return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \\ - m.ColdStartCost[g]*(1", "- unit staying off: RHS = maximum generator output (degenerate upper bound due", "m.TimePeriods if n >= t) >= 0.0 # constraint due to initial conditions.", "> eps): return -m.ThermalLimit[l] <= m.LinePower[l, t] else: return Constraint.Skip def upper_line_power_bounds_rule(m, l,", "small time horizons are specified return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOnT0[g]))", "status (RHS is defined as the delta from m.PowerGenerated[g, t-1]) # (0, 0)", "g): if value(m.InitialTimePeriodsOnLine[g]) == 0: return Constraint.Skip return sum((1 - m.UnitOn[g, t]) for", "\\ m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] -", "True: constraint = constraint + sum(m.PowerOutputStorage[s, t] for s in m.StorageAtBus[b]) \\ -", "- m.UnitOn[g,t]) else: return m.MaximumPowerAvailable[g, t-1] <= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g, t]", "model.EnforceUpTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_up_time_constraints_subsequent) model.EnforceDownTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_down_time_constraints_initial) model.EnforceDownTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods,", "if t <= value(m.InitialTimePeriodsOffLine[g])) == 0.0 # constraint for each time period after", "up in # this interval, it must remain on-line until the end of", "for each generator, for each time period def compute_hot_start_rule(m, g, t): if t", "m.UnitOn[g, t-1])) for n in m.TimePeriods if n >= t) >= 0.0 #", "Power balance at each node (S) # bus b, time t (S) constraint", "of the cost function. return m.TimePeriodLength * m.PowerGenerationPiecewiseValues[g,t][x] * m.FuelCost[g] def constraint_for_cost(model): model.ComputeProductionCosts", "- unit switching on: RHS = standard ramp-down limit minus shutdown ramp limit", "unit is required to be on. if t == 0: return sum((1 -", "t])) for n in m.TimePeriods if n >= t) >= 0.0 def commitment_in_stage_st_cost_rule(m,", "= Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_down_limits_rule) model.EnforceNominalRampUpLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_up_limits_rule) def constraint_up_down_time(model): model.EnforceUpTimeConstraintsInitial =", "value(m.MinimumUpTime[g]) - 1)) >= \\ m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1]) else:", "n >= t and n <= (t + value(m.MinimumUpTime[g]) - 1)) >= \\", "upper bound) - this is the strangest case. # (1, 0) - unit", "m.UnitOnT0[g]) else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g,", "t] for t in m.CommitmentTimeInStage[st]) * m.TimePeriodLength for g in m.Generators)) def generation_in_stage_st_cost_rule(m,", "in m.TimePeriods)) >= 0.0 def power_balance(m, b, t, has_storage=False, has_non_dispatchable_generators=False): # Power balance", "m.UnitOn[g, t+1]) #This version fixes the problem with ignoring initial conditions mentioned in", "0: return m.MaximumPowerAvailable[g, t] <= m.PowerGeneratedT0[g] + \\ m.NominalRampUpLimit[g] * m.UnitOnT0[g] + \\", "enforce_generator_output_limits_rule_part_c(m, g, t): return m.MaximumPowerAvailable[g,t] <= m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] def enforce_max_available_ramp_up_rates_rule(m,", "has_global_reserves=True, has_regulating_reserves=True, has_zonal_reserves=False): if has_global_reserves is True: fn_enforce_reserve_requirements = partial(enforce_reserve_requirements_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators, has_global_reserves=has_global_reserves)", "in m.TimePeriods)) >= 0.0 def pos_load_generate_mismatch_tolerance_rule(m, b): return sum((m.posLoadGenerateMismatch[b,t] for t in m.TimePeriods))", "m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) else: return m.PowerGenerated[g,", "m.HotStart[g, t].fixed = True return Constraint.Skip else: return m.HotStart[g, t] <= sum( m.UnitOn[g,", "\\ -1 * ( m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) ) else: return", "(1 - m.UnitOnT0[g]) ) else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] >= \\", "m.GenerationTimeInStage[st])) def StageCost_rule(m, st): return m.StageCost[st] == m.GenerationStageCost[st] + m.CommitmentStageCost[st] def total_cost_objective_rule(m): return", "1) time periods - if a unit is shut down in # this", "output (degenerate upper bound) # (0, 1) - unit switching on: RHS =", "m.negGlobalLoadGenerateMismatch[t] for t in m.GenerationTimeInStage[st])) def StageCost_rule(m, st): return m.StageCost[st] == m.GenerationStageCost[st] +", "t] == constraint return constraint # give meaning to the positive and negative", "- m.UnitOn[g, t-1])) for n in m.TimePeriods if n >= t) >= 0.0", "- m.negLoadGenerateMismatch[b, t] == m.LoadGenerateMismatch[b, t] def global_posneg_rule(m, t): return m.posGlobalLoadGenerateMismatch[t] - m.negGlobalLoadGenerateMismatch[t]", "has_storage=False, has_non_dispatchable_generators=False): constraint = sum((1 - m.GeneratorForcedOutage[g,t]) * m.GeneratorBusContributionFactor[g, b] * m.PowerGenerated[g, t]", "+ \\ -1 * ( m.StartupRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) ) +", "t-1])) # compute the per-generator, per-time period shutdown costs. def compute_shutdown_costs_rule(m, g, t):", "between {} and {} \".format(m.BusFrom[l], m.BusTo[l])) return Constraint.Skip else: return m.LinePower[l,t] == m.B[l]", ") else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] >= \\ -1 * (", "handle the final (MinimumUpTime[g] - 1) time periods - if a unit is", "1)) >= \\ m.MinimumDownTime[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) else: # handle", "in m.Storage) if has_global_reserves is True: constraint = constraint - m.ReserveRequirement[t] constraint =", "Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_ptdf_rule) else: partial_fix_first_angle_rule = partial(fix_first_angle_rule, slack_bus=slack_bus) model.FixFirstAngle = Constraint(model.TimePeriods, rule=partial_fix_first_angle_rule) model.CalculateLinePower", "= commitment_in_stage_st_cost_rule) model.Compute_generation_in_stage_st_cost = Constraint(model.StageSet, rule = generation_in_stage_st_cost_rule) model.Compute_Stage_Cost = Constraint(model.StageSet, rule =", "in previous time period if t == 0: return m.MaximumPowerAvailable[g, t] <= m.PowerGeneratedT0[g]", "(m.UnitOn[g, t-1] - m.UnitOn[g, t]) ) + \\ -1 * ( m.MaximumPowerOutput[g, t]", "t-1]) ) # compute startup costs for each generator, for each time period", "t]) def enforce_ramp_down_limits_rule(m, g, t): # 4 cases, split by (t-1, t) unit", "+ \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) else: return m.PowerGenerated[g, t-1] -", "model.Global_Defineposneg_Mismatch = Constraint(model.TimePeriods, rule = global_posneg_rule) def constraint_power_balance(model, has_storage=False, has_non_dispatchable_generators=False): fn_power_balance = partial(power_balance,", "generator, for each time period def compute_hot_start_rule(m, g, t): if t <= value(m.ColdStartHours[g]):", "else: return sum(((1 - m.UnitOn[g, n]) - (m.UnitOn[g, t-1] - m.UnitOn[g, t])) for", "t] def enforce_generator_output_limits_rule_part_c(m, g, t): return m.MaximumPowerAvailable[g,t] <= m.MaximumPowerOutput[g, t] * m.UnitOn[g, t]", "if n >= t) >= 0.0 # constraint due to initial conditions. def", ">= \\ m.MinimumDownTime[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) else: # handle the", "= standard ramp limit minus startup ramp limit plus maximum power output (degenerate", "model.TimePeriods, rule=line_power_ptdf_rule) else: partial_fix_first_angle_rule = partial(fix_first_angle_rule, slack_bus=slack_bus) model.FixFirstAngle = Constraint(model.TimePeriods, rule=partial_fix_first_angle_rule) model.CalculateLinePower =", "m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g,t]) else: return m.MaximumPowerAvailable[g, t-1] <= \\ m.MaximumPowerOutput[g, t]", "return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] >= \\ -1 * ( m.NominalRampUpLimit[g] * m.UnitOn[g,", "sum(m.LinePower[l,t] for l in m.LinesTo[b]) \\ - sum(m.LinePower[l,t] for l in m.LinesFrom[b]) \\", "if n >= t and n <= (t + value(m.MinimumUpTime[g]) - 1)) >=", "function for use in piecewise linearization of the cost function. return m.TimePeriodLength *", "remain off-line until the end of the time span. if t == 0:", "== 0: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) else:", "in m.TimePeriods if t <= value(m.InitialTimePeriodsOnLine[g])) == 0.0 # constraint for each time", "t): # 4 cases, split by (t-1, t) unit status (RHS is defined", "<= m.PowerGenerated[g, t-1] + \\ m.NominalRampUpLimit[g] * m.UnitOn[g, t-1] + \\ m.StartupRampLimit[g] *", "- this is the strangest case #NOTE: This may never be physically true,", "constraint_reserves(model, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=True, has_regulating_reserves=True, has_zonal_reserves=False): if has_global_reserves is True: fn_enforce_reserve_requirements = partial(enforce_reserve_requirements_rule,", "commitment_in_stage_st_cost_rule(m, st): return m.CommitmentStageCost[st] == (sum(m.StartupCost[g,t] + m.ShutdownCost[g,t] for g in m.Generators for", "is True: model.EnforceZonalReserveRequirements = Constraint(model.ReserveZones, model.TimePeriods, rule=enforce_zonal_reserve_requirement_rule) def constraint_generator_power(model): model.EnforceGeneratorOutputLimitsPartA = Constraint(model.Generators, model.TimePeriods,", "at each node (S) # bus b, time t (S) constraint = m.NetPowerInjectionAtBus[b,", "(t + value(m.MinimumUpTime[g]) - 1)) >= \\ m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOnT0[g])", "if t == 0: return sum(m.UnitOn[g, n] for n in m.TimePeriods if n", "value(m.ColdStartHours[g]) <= value(m.UnitOnT0State[g]): m.HotStart[g, t] = 1 m.HotStart[g, t].fixed = True return Constraint.Skip", "if t - value(m.ColdStartHours[g]) <= value(m.UnitOnT0State[g]): m.HotStart[g, t] = 1 m.HotStart[g, t].fixed =", "return Constraint.Skip elif t <= (value(m.NumTimePeriods - m.MinimumUpTime[g]) + 1): # the right-hand", "period def compute_hot_start_rule(m, g, t): if t <= value(m.ColdStartHours[g]): if t - value(m.ColdStartHours[g])", "- 1) time periods - if a unit is started up in #", "of the time span. if t == 0: # can happen when small", "return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i in range(t - m.ColdStartHours[g],", "in m.Generators for t in m.GenerationTimeInStage[st]) + m.LoadMismatchPenalty * \\ (sum(m.posLoadGenerateMismatch[b, t] +", "t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1]", "t == 0: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOnT0[g] - m.UnitOn[g, t])", "for b in m.Buses for t in m.GenerationTimeInStage[st]) + \\ sum(m.posGlobalLoadGenerateMismatch[t] + m.negGlobalLoadGenerateMismatch[t]", "return m.ThermalLimit[l] >= m.LinePower[l, t] else: return Constraint.Skip def line_power_ptdf_rule(m, l, t): return", "bound) - this is the strangest case. # (1, 0) - unit switching", "subsequently here, this constraint does NOT consider ramp down from initial conditions to", "t] - m.UnitOnT0[g])) else: return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g,", "maximum generator output (degenerate upper bound due to unit being off) # (0,", "@simple_constraint_rule def enforce_up_time_constraints_subsequent(m, g, t): if t <= value(m.InitialTimePeriodsOnLine[g]): # handled by the", "Power Angle constraint skipped for line between {} and {} \".format(m.BusFrom[l], m.BusTo[l])) return", "no MaximumPowerAvailableT0 return m.PowerGeneratedT0[g] <= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g,t] + \\ m.ShutdownRampLimit[g]", "- unit staying off: RHS = 0 (degenerate upper bound) # (0, 1)", "m.MaximumPowerAvailable[g, t] <= m.PowerGeneratedT0[g] + \\ m.NominalRampUpLimit[g] * m.UnitOnT0[g] + \\ m.StartupRampLimit[g] *", "pw_pts=model.PowerGenerationPiecewisePoints, f_rule=production_cost_function, pw_constr_type='LB', warning_tol=1e-20) model.ComputeHotStart = Constraint(model.Generators, model.TimePeriods, rule=compute_hot_start_rule) model.ComputeStartupCostsMinusM = Constraint(model.Generators, model.TimePeriods,", "- 1)) >= \\ m.MinimumDownTime[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) else: return sum((1", "constraint_line(model, ptdf=None, slack_bus=1): model.LinePowerConstraintLower = Constraint(model.TransmissionLines, model.TimePeriods, rule=lower_line_power_bounds_rule) model.LinePowerConstraintHigher = Constraint(model.TransmissionLines, model.TimePeriods, rule=upper_line_power_bounds_rule)", "def upper_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return m.ThermalLimit[l] >=", "calculate_regulating_reserve_up_available_per_generator(m, g, t): return m.RegulatingReserveUpAvailable[g, t] == m.MaximumPowerAvailable[g,t] - m.PowerGenerated[g,t] def enforce_zonal_reserve_requirement_rule(m, rz,", "def enforce_zonal_reserve_requirement_rule(m, rz, t): return sum(m.RegulatingReserveUpAvailable[g,t] for g in m.GeneratorsInReserveZone[rz]) >= m.ZonalReserveRequirement[rz, t]", "(1 - m.UnitOn[g, t-1]) def enforce_ramp_up_limits_rule(m, g, t): # 4 cases, split by", "and np.any(np.absolute(m.ThermalLimit[l]) > eps): return -m.ThermalLimit[l] <= m.LinePower[l, t] else: return Constraint.Skip def", "= maximum generator output (degenerate upper bound due to unit being off) #", "model.TimePeriods, rule=enforce_up_time_constraints_subsequent) model.EnforceDownTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_down_time_constraints_initial) model.EnforceDownTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_down_time_constraints_subsequent) def production_cost_function(m,", "by the EnforceUpTimeConstraintInitial constraint. return Constraint.Skip elif t <= (value(m.NumTimePeriods - m.MinimumUpTime[g]) +", "0.0 else: return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOn[g, t-1])) for n", "rule=enforce_down_time_constraints_subsequent) def production_cost_function(m, g, t, x): # a function for use in piecewise", "not None: model.PTDF = ptdf model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_ptdf_rule) else: partial_fix_first_angle_rule =", "t] >= \\ -1 * ( m.NominalRampUpLimit[g] * m.UnitOn[g, t] ) + \\", "= maximum generator output (degenerate upper bound) #NOTE: As expressed in Carrion-Arroyo and", "m.UnitOn[g, t] <= m.PowerGenerated[g,t] def enforce_generator_output_limits_rule_part_b(m, g, t): return m.PowerGenerated[g,t] <= m.MaximumPowerAvailable[g, t]", "- m.UnitOn[g, t-1])) # compute the per-generator, per-time period shutdown costs. def compute_shutdown_costs_rule(m,", "t]) for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOnLine[g])) == 0.0 # constraint", "and {} \".format(m.BusFrom[l], m.BusTo[l])) return Constraint.Skip else: return m.LinePower[l,t] == m.B[l] * (m.Angle[m.BusFrom[l],", "(S) constraint = m.NetPowerInjectionAtBus[b, t] + sum(m.LinePower[l,t] for l in m.LinesTo[b]) \\ -", "eps = 1e-3 def fix_first_angle_rule(m,t, slack_bus=1): return m.Angle[m.Buses[slack_bus], t] == 0.0 def lower_line_power_bounds_rule(m,", "t] * (1 - m.UnitOn[g, t]) else: return m.MaximumPowerAvailable[g, t] <= m.PowerGenerated[g, t-1]", "- m.UnitOn[g, t]) def enforce_ramp_down_limits_rule(m, g, t): # 4 cases, split by (t-1,", "return sum((m.posLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0 def power_balance(m, b, t, has_storage=False,", "* (1 - m.UnitOn[g, t-1]) def enforce_ramp_up_limits_rule(m, g, t): # 4 cases, split", "t] ) + \\ -1 * ( m.StartupRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t])", "t]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1]) def enforce_ramp_up_limits_rule(m, g,", "rule=fn_power_balance) def constraint_reserves(model, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=True, has_regulating_reserves=True, has_zonal_reserves=False): if has_global_reserves is True: fn_enforce_reserve_requirements", "Constraint(model.TimePeriods, rule=fn_enforce_reserve_requirements) if has_regulating_reserves is True: model.CalculateRegulatingReserveUpPerGenerator = Constraint(model.Generators, model.TimePeriods, rule=calculate_regulating_reserve_up_available_per_generator) if has_zonal_reserves", "m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t] * (1 -", "model.EnforceNominalRampDownLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_down_limits_rule) model.EnforceNominalRampUpLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_up_limits_rule) def constraint_up_down_time(model): model.EnforceUpTimeConstraintsInitial", "m.MinimumDownTime[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) else: return sum((1 - m.UnitOn[g, n]) for", "and n <= (t + value(m.MinimumDownTime[g]) - 1)) >= \\ m.MinimumDownTime[g] * (m.UnitOn[g,", "m.ZonalReserveRequirement[rz, t] def enforce_generator_output_limits_rule_part_a(m, g, t): return m.MinimumPowerOutput[g, t] * m.UnitOn[g, t] <=", "return sum(((1 - m.UnitOn[g, n]) - (m.UnitOnT0[g] - m.UnitOn[g, t])) for n in", "return m.MinimumPowerOutput[g, t] * m.UnitOn[g, t] <= m.PowerGenerated[g,t] def enforce_generator_output_limits_rule_part_b(m, g, t): return", "g, t): if t <= value(m.InitialTimePeriodsOffLine[g]): # handled by the EnforceDownTimeConstraintInitial constraint. return", "- m.ColdStartHours[g], t) ) def compute_startup_costs_rule_minusM(m, g, t): if t == 0: return", "+ \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t] *", "sum(m.PowerInputStorage[s, t] for s in m.StorageAtBus[b]) if has_non_dispatchable_generators is True: constraint = constraint", "generator output minus shutdown ramp limit (degenerate upper bound) - this is the", "involving the initial condition. @simple_constraint_rule def enforce_down_time_constraints_subsequent(m, g, t): if t <= value(m.InitialTimePeriodsOffLine[g]):", "return m.RegulatingReserveUpAvailable[g, t] == m.MaximumPowerAvailable[g,t] - m.PowerGenerated[g,t] def enforce_zonal_reserve_requirement_rule(m, rz, t): return sum(m.RegulatingReserveUpAvailable[g,t]", "t] * (1 - m.UnitOnT0[g]) else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] <=", "+ sum(m.PowerOutputStorage[s, t] for s in m.StorageAtBus[b]) \\ - sum(m.PowerInputStorage[s, t] for s", "eps): return m.ThermalLimit[l] >= m.LinePower[l, t] else: return Constraint.Skip def line_power_ptdf_rule(m, l, t):", "\\ -1 * ( m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1]) ) #", "0: # can happen when small time horizons are specified return sum((m.UnitOn[g, n]", "m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) def enforce_ramp_down_limits_rule(m,", "t] def global_posneg_rule(m, t): return m.posGlobalLoadGenerateMismatch[t] - m.negGlobalLoadGenerateMismatch[t] == m.GlobalLoadGenerateMismatch[t] def enforce_reserve_requirements_rule(m, t,", "g, t): return m.PowerGenerated[g,t] <= m.MaximumPowerAvailable[g, t] def enforce_generator_output_limits_rule_part_c(m, g, t): return m.MaximumPowerAvailable[g,t]", "t): return sum(m.RegulatingReserveUpAvailable[g,t] for g in m.GeneratorsInReserveZone[rz]) >= m.ZonalReserveRequirement[rz, t] def enforce_generator_output_limits_rule_part_a(m, g,", "model.PowerGenerated, pw_pts=model.PowerGenerationPiecewisePoints, f_rule=production_cost_function, pw_constr_type='LB', warning_tol=1e-20) model.ComputeHotStart = Constraint(model.Generators, model.TimePeriods, rule=compute_hot_start_rule) model.ComputeStartupCostsMinusM = Constraint(model.Generators,", "f_rule=production_cost_function, pw_constr_type='LB', warning_tol=1e-20) model.ComputeHotStart = Constraint(model.Generators, model.TimePeriods, rule=compute_hot_start_rule) model.ComputeStartupCostsMinusM = Constraint(model.Generators, model.TimePeriods, rule=compute_startup_costs_rule_minusM)", "(t-1, t) unit status (RHS is defined as the delta from m.PowerGenerated[g, t-1])", "is the minimum number of subsequent consecutive time periods that the unit is", "model.ProductionCost, model.PowerGenerated, pw_pts=model.PowerGenerationPiecewisePoints, f_rule=production_cost_function, pw_constr_type='LB', warning_tol=1e-20) model.ComputeHotStart = Constraint(model.Generators, model.TimePeriods, rule=compute_hot_start_rule) model.ComputeStartupCostsMinusM =", "- m.Demand[b, t] constraint = m.NetPowerInjectionAtBus[b, t] == constraint return constraint # give", "else: return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOn[g, t-1])) for n in", "m.Angle[m.Buses[slack_bus], t] == 0.0 def lower_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) >", "m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t])", "i]) * m.NetPowerInjectionAtBus[b, t] for i, b in enumerate(m.Buses)) def line_power_rule(m, l, t):", "sum(m.posGlobalLoadGenerateMismatch[t] + m.negGlobalLoadGenerateMismatch[t] for t in m.GenerationTimeInStage[st])) def StageCost_rule(m, st): return m.StageCost[st] ==", "* (m.UnitOn[g, t] - m.UnitOn[g, t-1]) + \\ m.MaximumPowerOutput[g, t] * (1 -", "period but on in this one => # the value is the minimum", "partial_fix_first_angle_rule = partial(fix_first_angle_rule, slack_bus=slack_bus) model.FixFirstAngle = Constraint(model.TimePeriods, rule=partial_fix_first_angle_rule) model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_rule)", "time period after that not involving the initial condition. @simple_constraint_rule def enforce_down_time_constraints_subsequent(m, g,", "switching off: RHS = standard ramp limit minus startup ramp limit plus maximum", "constraint_total_demand(model): model.CalculateTotalDemand = Constraint(model.TimePeriods, rule=calculate_total_demand) def constraint_load_generation_mismatch(model): model.PosLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule) model.NegLoadGenerateMismatchTolerance =", ">= m.ShutdownCostCoefficient[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) def enforce_up_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOnLine[g])", "limit # (1, 1) - unit staying on: RHS = standard ramp-down limit", "for g in m.Generators for t in m.CommitmentTimeInStage[st]) + sum(sum(m.UnitOn[g,t] * m.MinimumProductionCost[g, t]", "in m.GenerationTimeInStage[st]) + \\ sum(m.posGlobalLoadGenerateMismatch[t] + m.negGlobalLoadGenerateMismatch[t] for t in m.GenerationTimeInStage[st])) def StageCost_rule(m,", "model.TimePeriods, rule=partial_net_power_at_bus_rule) ################################################ def constraint_line(model, ptdf=None, slack_bus=1): model.LinePowerConstraintLower = Constraint(model.TransmissionLines, model.TimePeriods, rule=lower_line_power_bounds_rule) model.LinePowerConstraintHigher", "plus power generated in previous time period if t == 0: return m.MaximumPowerAvailable[g,", "t-1] - m.UnitOn[g, t]) ) + \\ -1 * ( m.MaximumPowerOutput[g, t] *", "initial condition. @simple_constraint_rule def enforce_down_time_constraints_subsequent(m, g, t): if t <= value(m.InitialTimePeriodsOffLine[g]): # handled", "fix_first_angle_rule(m,t, slack_bus=1): return m.Angle[m.Buses[slack_bus], t] == 0.0 def lower_line_power_bounds_rule(m, l, t): if m.EnforceLine[l]", "model.TimePeriods, rule=enforce_ramp_down_limits_rule) model.EnforceNominalRampUpLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_up_limits_rule) def constraint_up_down_time(model): model.EnforceUpTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_up_time_constraints_initial)", "def constraint_generator_power(model): model.EnforceGeneratorOutputLimitsPartA = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_a) model.EnforceGeneratorOutputLimitsPartB = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_b) model.EnforceGeneratorOutputLimitsPartC", "<= (t + value(m.MinimumDownTime[g]) - 1)) >= \\ m.MinimumDownTime[g] * (m.UnitOn[g, t-1] -", "t and n <= (t + value(m.MinimumUpTime[g]) - 1)) >= \\ m.MinimumUpTime[g] *", "* \\ (sum(m.posLoadGenerateMismatch[b, t] + m.negLoadGenerateMismatch[b, t] for b in m.Buses for t", "as np from functools import partial import logging from pyomo.environ import * logger", "0.0 def pos_load_generate_mismatch_tolerance_rule(m, b): return sum((m.posLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0 def", "- m.UnitOn[g, t+1]) #This version fixes the problem with ignoring initial conditions mentioned", "return -m.ThermalLimit[l] <= m.LinePower[l, t] else: return Constraint.Skip def upper_line_power_bounds_rule(m, l, t): if", "limit (degenerate upper bound) - this is the strangest case. # (1, 0)", "g, t): if t == 0: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOnT0[g]", "upper_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return m.ThermalLimit[l] >= m.LinePower[l,", "rz, t): return sum(m.RegulatingReserveUpAvailable[g,t] for g in m.GeneratorsInReserveZone[rz]) >= m.ZonalReserveRequirement[rz, t] def enforce_generator_output_limits_rule_part_a(m,", "lower_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return -m.ThermalLimit[l] <= m.LinePower[l,", "compute_startup_costs_rule_minusM(m, g, t): if t == 0: return m.StartupCost[g, t] >= m.ColdStartCost[g] -", "m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t]", "enforce_reserve_requirements_rule(m, t, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=False): constraint = sum(m.MaximumPowerAvailable[g, t] for g in m.Generators)", "m.TimePeriods)) >= 0.0 def pos_load_generate_mismatch_tolerance_rule(m, b): return sum((m.posLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >=", "- m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] *", "m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] def enforce_max_available_ramp_up_rates_rule(m, g, t): # 4 cases, split", "shutdown ramp limit # (1, 1) - unit staying on: RHS = maximum", "to initial conditions. def enforce_down_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOffLine[g]) == 0: return Constraint.Skip return", "for n in m.TimePeriods if n >= t) >= 0.0 else: return sum((m.UnitOn[g,", "g in m.NondispatchableGeneratorsAtBus[b]) constraint = constraint + m.LoadGenerateMismatch[b,t] constraint = constraint - m.Demand[b,", "for l in m.LinesTo[b]) \\ - sum(m.LinePower[l,t] for l in m.LinesFrom[b]) \\ +", "has_non_dispatchable_generators=False): # Power balance at each node (S) # bus b, time t", "- m.UnitOn[g, t]) for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOnLine[g])) == 0.0", "def posneg_rule(m, b, t): return m.posLoadGenerateMismatch[b, t] - m.negLoadGenerateMismatch[b, t] == m.LoadGenerateMismatch[b, t]", "g in m.Generators)) def generation_in_stage_st_cost_rule(m, st): return m.GenerationStageCost[st] == sum(m.ProductionCost[g, t] for g", "t) >= 0.0 else: return sum(((1 - m.UnitOn[g, n]) - (m.UnitOn[g, t-1] -", "can happen when small time horizons are specified return sum(((1 - m.UnitOn[g, n])", ">= m.LinePower[l, t] else: return Constraint.Skip def line_power_ptdf_rule(m, l, t): return m.LinePower[l,t] ==", "t-1] - m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1])", "* ( m.NominalRampUpLimit[g] * m.UnitOn[g, t] ) + \\ -1 * ( m.StartupRampLimit[g]", "# 4 cases, split by (t-1, t) unit status (RHS is defined as", "bound) # (0, 1) - unit switching on: RHS = maximum generator output", "0 return constraint def net_power_at_bus_rule(m, b, t, has_storage=False, has_non_dispatchable_generators=False): constraint = sum((1 -", "def lower_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return -m.ThermalLimit[l] <=", "m.PowerGeneratedT0[g] <= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g,t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] -", "t) >= 0.0 # constraint due to initial conditions. def enforce_down_time_constraints_initial(m, g): if", "sum(m.StageCost[st] for st in m.StageSet) def constraint_net_power(model, has_storage=False, has_non_dispatchable_generators=False): partial_net_power_at_bus_rule = partial(net_power_at_bus_rule, has_storage=has_storage,", "\\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t]) def enforce_max_available_ramp_down_rates_rule(m, g, t): #", "consider ramp down from initial conditions to t=1! #if t == value(m.NumTimePeriods): #", "upper bound) #NOTE: As expressed in Carrion-Arroyo and subsequently here, this constraint does", "Constraint(model.TransmissionLines, model.TimePeriods, rule=lower_line_power_bounds_rule) model.LinePowerConstraintHigher = Constraint(model.TransmissionLines, model.TimePeriods, rule=upper_line_power_bounds_rule) if ptdf is not None:", "net_power_at_bus_rule(m, b, t, has_storage=False, has_non_dispatchable_generators=False): constraint = sum((1 - m.GeneratorForcedOutage[g,t]) * m.GeneratorBusContributionFactor[g, b]", "is no MaximumPowerAvailableT0 return m.PowerGeneratedT0[g] <= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g,t] + \\", "if t <= value(m.ColdStartHours[g]): if t - value(m.ColdStartHours[g]) <= value(m.UnitOnT0State[g]): m.HotStart[g, t] =", "m.MaximumPowerAvailable[g, t] <= m.PowerGenerated[g, t-1] + \\ m.NominalRampUpLimit[g] * m.UnitOn[g, t-1] + \\", "and subsequently here, this constraint does NOT consider ramp down from initial conditions", "partial(enforce_reserve_requirements_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators, has_global_reserves=has_global_reserves) model.EnforceReserveRequirements = Constraint(model.TimePeriods, rule=fn_enforce_reserve_requirements) if has_regulating_reserves is True: model.CalculateRegulatingReserveUpPerGenerator", "RHS = shutdown ramp limit # (1, 1) - unit staying on: RHS", "m.LinePower[l, t] else: return Constraint.Skip def line_power_ptdf_rule(m, l, t): return m.LinePower[l,t] == sum(float(m.PTDF[l,", "* (1 - m.UnitOn[g, t-1]) ) # compute startup costs for each generator,", "\\ m.MinimumDownTime[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) else: return sum((1 - m.UnitOn[g, n])", "t, has_storage=False, has_non_dispatchable_generators=False): constraint = sum((1 - m.GeneratorForcedOutage[g,t]) * m.GeneratorBusContributionFactor[g, b] * m.PowerGenerated[g,", "t in m.TimePeriods)) >= 0.0 def power_balance(m, b, t, has_storage=False, has_non_dispatchable_generators=False): # Power", "( m.StartupRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) ) + \\ -1 *", "this is the strangest case #NOTE: This may never be physically true, but", "# handled by the EnforceUpTimeConstraintInitial constraint. return Constraint.Skip elif t <= (value(m.NumTimePeriods -", "node (S) # bus b, time t (S) constraint = m.NetPowerInjectionAtBus[b, t] +", "# give meaning to the positive and negative parts of the mismatch def", "99999999: logger.debug(\" Line Power Angle constraint skipped for line between {} and {}", "t] * (1 - m.UnitOn[g, t-1]) ) # compute startup costs for each", "time periods - if a unit is started up in # this interval,", "plus maximum generator output - this is the strangest case #NOTE: This may", "<= (t + value(m.MinimumDownTime[g]) - 1)) >= \\ m.MinimumDownTime[g] * (m.UnitOnT0[g] - m.UnitOn[g,", "- m.UnitOn[g, t]) def enforce_max_available_ramp_down_rates_rule(m, g, t): # 4 cases, split by (t,", "m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g]", "time period but on in this one => # the value is the", "for g in m.Generators) if has_non_dispatchable_generators is True: constraint = constraint + sum(m.NondispatchablePowerUsed[n,t]", "startup ramp limit # (1, 0) - unit switching off: RHS = standard", "generator output (degenerate upper bound) #NOTE: As expressed in Carrion-Arroyo and subsequently here,", "t+1] + \\ # m.ShutdownRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t+1]) #This version", "if t == 0: return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g,", "must remain on-line until the end of the time span. if t ==", "the problem with ignoring initial conditions mentioned in the above note if t", "if ptdf is not None: model.PTDF = ptdf model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_ptdf_rule)", "(m.UnitOn[g, t] - m.UnitOnT0[g])) else: return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] -", "the unit was off in the previous time period but on in this", "t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g,", "number of subsequent consecutive time periods that the unit is required to be", "model.TimePeriods, rule=enforce_generator_output_limits_rule_part_b) model.EnforceGeneratorOutputLimitsPartC = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_c) model.EnforceMaxAvailableRampUpRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_up_rates_rule) model.EnforceMaxAvailableRampDownRates", "== (sum(m.StartupCost[g,t] + m.ShutdownCost[g,t] for g in m.Generators for t in m.CommitmentTimeInStage[st]) +", "<= m.LinePower[l, t] else: return Constraint.Skip def upper_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and", "m.UnitOn[g, n]) for n in m.TimePeriods if n >= t and n <=", "# compute the per-generator, per-time period shutdown costs. def compute_shutdown_costs_rule(m, g, t): if", "each time period after that not involving the initial condition. @simple_constraint_rule def enforce_down_time_constraints_subsequent(m,", "for t in m.TimePeriods)) >= 0.0 def pos_load_generate_mismatch_tolerance_rule(m, b): return sum((m.posLoadGenerateMismatch[b,t] for t", "compute startup costs for each generator, for each time period def compute_hot_start_rule(m, g,", "model.FixFirstAngle = Constraint(model.TimePeriods, rule=partial_fix_first_angle_rule) model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_rule) def constraint_total_demand(model): model.CalculateTotalDemand =", "def constraint_reserves(model, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=True, has_regulating_reserves=True, has_zonal_reserves=False): if has_global_reserves is True: fn_enforce_reserve_requirements =", "model.EnforceDownTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_down_time_constraints_subsequent) def production_cost_function(m, g, t, x): # a function", "split by (t-1, t) unit status: # (0, 0) - unit staying off:", "m.Buses for t in m.GenerationTimeInStage[st]) + \\ sum(m.posGlobalLoadGenerateMismatch[t] + m.negGlobalLoadGenerateMismatch[t] for t in", "# a function for use in piecewise linearization of the cost function. return", ">= m.ZonalReserveRequirement[rz, t] def enforce_generator_output_limits_rule_part_a(m, g, t): return m.MinimumPowerOutput[g, t] * m.UnitOn[g, t]", "m.UnitOn[g, t] ) + \\ -1 * ( m.StartupRampLimit[g] * (m.UnitOn[g, t-1] -", "rule = global_posneg_rule) def constraint_power_balance(model, has_storage=False, has_non_dispatchable_generators=False): fn_power_balance = partial(power_balance, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.PowerBalance", "( m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1]) ) # compute startup costs", "= Constraint(model.Buses, model.TimePeriods, rule=partial_net_power_at_bus_rule) ################################################ def constraint_line(model, ptdf=None, slack_bus=1): model.LinePowerConstraintLower = Constraint(model.TransmissionLines, model.TimePeriods,", "* model.TimePeriods, model.ProductionCost, model.PowerGenerated, pw_pts=model.PowerGenerationPiecewisePoints, f_rule=production_cost_function, pw_constr_type='LB', warning_tol=1e-20) model.ComputeHotStart = Constraint(model.Generators, model.TimePeriods, rule=compute_hot_start_rule)", "+ \\ m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) + \\ m.MaximumPowerOutput[g, t] *", "t == 0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g,", "value(m.MinimumDownTime[g]) - 1)) >= \\ m.MinimumDownTime[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) else:", "the final (MinimumUpTime[g] - 1) time periods - if a unit is started", "m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return -m.ThermalLimit[l] <= m.LinePower[l, t] else: return Constraint.Skip", "end of the time span. if t == 0: # can happen when", "################################################ def constraint_line(model, ptdf=None, slack_bus=1): model.LinePowerConstraintLower = Constraint(model.TransmissionLines, model.TimePeriods, rule=lower_line_power_bounds_rule) model.LinePowerConstraintHigher = Constraint(model.TransmissionLines,", ") def compute_startup_costs_rule_minusM(m, g, t): if t == 0: return m.StartupCost[g, t] >=", "unit staying off: RHS = maximum generator output (degenerate upper bound) # (0,", "0) - unit staying off: RHS = 0 (degenerate upper bound) # (0,", "rule=fn_enforce_reserve_requirements) if has_regulating_reserves is True: model.CalculateRegulatingReserveUpPerGenerator = Constraint(model.Generators, model.TimePeriods, rule=calculate_regulating_reserve_up_available_per_generator) if has_zonal_reserves is", "m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) def enforce_up_time_constraints_initial(m, g):", "by (t-1, t) unit status: # (0, 0) - unit staying off: RHS", "after that not involving the initial condition. @simple_constraint_rule def enforce_down_time_constraints_subsequent(m, g, t): if", "standard ramp-down limit if t == 0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] >=", "t) ) def compute_startup_costs_rule_minusM(m, g, t): if t == 0: return m.StartupCost[g, t]", "n] - (m.UnitOn[g, t] - m.UnitOn[g, t-1])) for n in m.TimePeriods if n", "staying off: RHS = 0 (degenerate upper bound) # (0, 1) - unit", "cases, split by (t-1, t) unit status: # (0, 0) - unit staying", ") # compute startup costs for each generator, for each time period def", "this interval, it must remain off-line until the end of the time span.", "# 4 cases, split by (t, t+1) unit status # (0, 0) -", "m.TimePeriodLength * m.PowerGenerationPiecewiseValues[g,t][x] * m.FuelCost[g] def constraint_for_cost(model): model.ComputeProductionCosts = Piecewise(model.Generators * model.TimePeriods, model.ProductionCost,", "l in m.LinesTo[b]) \\ - sum(m.LinePower[l,t] for l in m.LinesFrom[b]) \\ + m.LoadGenerateMismatch[b,t]", "is defined as the delta from m.PowerGenerated[g, t-1]) # (0, 0) - unit", "on: RHS = standard ramp limit plus power generated in previous time period", "handled by the EnforceDownTimeConstraintInitial constraint. return Constraint.Skip elif t <= (value(m.NumTimePeriods - m.MinimumDownTime[g])", "does NOT consider ramp down from initial conditions to t=1! #if t ==", "this one since there is no MaximumPowerAvailableT0 return m.PowerGeneratedT0[g] <= \\ m.MaximumPowerOutput[g, t]", "Carrion-Arroyo and subsequently here, this constraint does NOT consider ramp down from initial", "(m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOn[g, t-1]))", "<= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1]", "t): return m.posGlobalLoadGenerateMismatch[t] - m.negGlobalLoadGenerateMismatch[t] == m.GlobalLoadGenerateMismatch[t] def enforce_reserve_requirements_rule(m, t, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=False):", "model.EnforceNominalRampUpLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_up_limits_rule) def constraint_up_down_time(model): model.EnforceUpTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_up_time_constraints_initial) model.EnforceUpTimeConstraintsSubsequent =", "initial conditions. def enforce_down_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOffLine[g]) == 0: return Constraint.Skip return sum(m.UnitOn[g,", "== m.LoadGenerateMismatch[b, t] def global_posneg_rule(m, t): return m.posGlobalLoadGenerateMismatch[t] - m.negGlobalLoadGenerateMismatch[t] == m.GlobalLoadGenerateMismatch[t] def", "if t == 0: # Not 100% sure of this one since there", "can happen when small time horizons are specified return sum((m.UnitOn[g, n] - (m.UnitOn[g,", "model.TimePeriods, rule=compute_shutdown_costs_rule) model.Compute_commitment_in_stage_st_cost = Constraint(model.StageSet, rule = commitment_in_stage_st_cost_rule) model.Compute_generation_in_stage_st_cost = Constraint(model.StageSet, rule =", "return sum(m.RegulatingReserveUpAvailable[g,t] for g in m.GeneratorsInReserveZone[rz]) >= m.ZonalReserveRequirement[rz, t] def enforce_generator_output_limits_rule_part_a(m, g, t):", "m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1]) ) # compute startup costs for", "model.TimePeriods, rule=lower_line_power_bounds_rule) model.LinePowerConstraintHigher = Constraint(model.TransmissionLines, model.TimePeriods, rule=upper_line_power_bounds_rule) if ptdf is not None: model.PTDF", "t-1] - m.UnitOn[g, t])) for n in m.TimePeriods if n >= t) >=", "model.PowerBalance = Constraint(model.Buses, model.TimePeriods, rule=fn_power_balance) def constraint_reserves(model, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=True, has_regulating_reserves=True, has_zonal_reserves=False): if", "for i in range(t - m.ColdStartHours[g], t) ) def compute_startup_costs_rule_minusM(m, g, t): if", "switching off: RHS = shutdown ramp limit # (1, 1) - unit staying", "RHS = maximum generator output (degenerate upper bound due to unit being off)", "sum( m.UnitOn[g, i] for i in range(t - m.ColdStartHours[g], t) ) def compute_startup_costs_rule_minusM(m,", "t (S) constraint = m.NetPowerInjectionAtBus[b, t] + sum(m.LinePower[l,t] for l in m.LinesTo[b]) \\", "if value(m.InitialTimePeriodsOnLine[g]) == 0: return Constraint.Skip return sum((1 - m.UnitOn[g, t]) for t", "time span. if t == 0: # can happen when small time horizons", "generated in previous time period if t == 0: return m.MaximumPowerAvailable[g, t] <=", "partial(fix_first_angle_rule, slack_bus=slack_bus) model.FixFirstAngle = Constraint(model.TimePeriods, rule=partial_fix_first_angle_rule) model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_rule) def constraint_total_demand(model):", "this constraint causes problems # (1, 0) - unit switching off: RHS =", "unit was off in the previous time period but on in this one", "defined as the delta from m.PowerGenerated[g, t-1]) # (0, 0) - unit staying", "if n >= t) >= 0.0 else: return sum((m.UnitOn[g, n] - (m.UnitOn[g, t]", "\\ - sum(m.PowerInputStorage[s, t] for s in m.StorageAtBus[b]) if has_non_dispatchable_generators is True: constraint", "m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] >= \\ -1 * ( m.NominalRampUpLimit[g] * m.UnitOn[g,", "constraint = constraint - m.Demand[b, t] constraint = m.NetPowerInjectionAtBus[b, t] == constraint return", "Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_c) model.EnforceMaxAvailableRampUpRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_up_rates_rule) model.EnforceMaxAvailableRampDownRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_down_rates_rule)", "0 (degenerate upper bound) # (0, 1) - unit switching on: RHS =", "enforce_generator_output_limits_rule_part_a(m, g, t): return m.MinimumPowerOutput[g, t] * m.UnitOn[g, t] <= m.PowerGenerated[g,t] def enforce_generator_output_limits_rule_part_b(m,", "period if t == 0: return m.MaximumPowerAvailable[g, t] <= m.PowerGeneratedT0[g] + \\ m.NominalRampUpLimit[g]", "generator output (degenerate upper bound) # (0, 1) - unit switching on: RHS", "(0, 0) - unit staying off: RHS = maximum generator output (degenerate upper", "return Constraint.Skip def upper_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return", "= m.NetPowerInjectionAtBus[b, t] + sum(m.LinePower[l,t] for l in m.LinesTo[b]) \\ - sum(m.LinePower[l,t] for", "limit # (1, 1) - unit staying on: RHS = maximum generator output", "m.LoadGenerateMismatch[b,t] constraint = constraint - m.Demand[b, t] constraint = m.NetPowerInjectionAtBus[b, t] == constraint", "constraint = constraint - m.ReserveRequirement[t] constraint = constraint == m.TotalDemand[t] + m.GlobalLoadGenerateMismatch[t] return", "maximum power output (degenerate upper bound due to unit off) # (1, 1)", "model.CalculateTotalDemand = Constraint(model.TimePeriods, rule=calculate_total_demand) def constraint_load_generation_mismatch(model): model.PosLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule) model.NegLoadGenerateMismatchTolerance = Constraint(model.Buses,", "return m.posLoadGenerateMismatch[b, t] - m.negLoadGenerateMismatch[b, t] == m.LoadGenerateMismatch[b, t] def global_posneg_rule(m, t): return", "return Constraint.Skip elif t <= (value(m.NumTimePeriods - m.MinimumDownTime[g]) + 1): # the right-hand", "m.negLoadGenerateMismatch[b, t] == m.LoadGenerateMismatch[b, t] def global_posneg_rule(m, t): return m.posGlobalLoadGenerateMismatch[t] - m.negGlobalLoadGenerateMismatch[t] ==", "- m.UnitOn[g, t]) else: # handle the final (MinimumDownTime[g] - 1) time periods", "in range(t - m.ColdStartHours[g], t) ) def compute_startup_costs_rule_minusM(m, g, t): if t ==", "* ( m.StartupRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) ) + \\ -1 *", "has_non_dispatchable_generators=has_non_dispatchable_generators) model.PowerBalance = Constraint(model.Buses, model.TimePeriods, rule=fn_power_balance) def constraint_reserves(model, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=True, has_regulating_reserves=True, has_zonal_reserves=False):", "t): if t <= value(m.ColdStartHours[g]): if t - value(m.ColdStartHours[g]) <= value(m.UnitOnT0State[g]): m.HotStart[g, t]", "RHS = standard ramp-down limit if t == 0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g,", "value is the minimum number of subsequent consecutive time periods that the unit", "x): # a function for use in piecewise linearization of the cost function.", "(m.UnitOnT0[g] - m.UnitOn[g, t])) for n in m.TimePeriods if n >= t) >=", "unit off) # (1, 1) - unit staying on: RHS = standard ramp", "return sum((1 - m.UnitOn[g, n]) for n in m.TimePeriods if n >= t", "staying on: RHS = standard ramp limit plus power generated in previous time", "t == 0: # can happen when small time horizons are specified return", "unit switching off: RHS = standard ramp limit minus startup ramp limit plus", ">= t) >= 0.0 # constraint due to initial conditions. def enforce_down_time_constraints_initial(m, g):", "b, t): return m.posLoadGenerateMismatch[b, t] - m.negLoadGenerateMismatch[b, t] == m.LoadGenerateMismatch[b, t] def global_posneg_rule(m,", "has_zonal_reserves is True: model.EnforceZonalReserveRequirements = Constraint(model.ReserveZones, model.TimePeriods, rule=enforce_zonal_reserve_requirement_rule) def constraint_generator_power(model): model.EnforceGeneratorOutputLimitsPartA = Constraint(model.Generators,", "is the strangest case #NOTE: This may never be physically true, but if", "\\ m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1]) else: # handle the final", "the per-generator, per-time period shutdown costs. def compute_shutdown_costs_rule(m, g, t): if t ==", "from pyomo.environ import * logger = logging.getLogger(__file__) eps = 1e-3 def fix_first_angle_rule(m,t, slack_bus=1):", "Constraint(model.Generators, model.TimePeriods, rule=compute_shutdown_costs_rule) model.Compute_commitment_in_stage_st_cost = Constraint(model.StageSet, rule = commitment_in_stage_st_cost_rule) model.Compute_generation_in_stage_st_cost = Constraint(model.StageSet, rule", "== 0: return m.MaximumPowerAvailable[g, t] <= m.PowerGeneratedT0[g] + \\ m.NominalRampUpLimit[g] * m.UnitOnT0[g] +", "* (m.UnitOn[g, t-1] - m.UnitOn[g, t]) ) + \\ -1 * ( m.MaximumPowerOutput[g,", "due to initial conditions. def enforce_down_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOffLine[g]) == 0: return Constraint.Skip", "1) - unit staying on: RHS = standard ramp-down limit if t ==", "== 0: # can happen when small time horizons are specified return sum((m.UnitOn[g,", "down in # this interval, it must remain off-line until the end of", "t <= (value(m.NumTimePeriods - m.MinimumUpTime[g]) + 1): # the right-hand side terms below", "enforce_ramp_down_limits_rule(m, g, t): # 4 cases, split by (t-1, t) unit status: #", "m.StorageAtBus[b]) \\ - sum(m.PowerInputStorage[s, t] for s in m.StorageAtBus[b]) if has_non_dispatchable_generators is True:", "t] + m.negLoadGenerateMismatch[b, t] for b in m.Buses for t in m.GenerationTimeInStage[st]) +", "compute the per-generator, per-time period shutdown costs. def compute_shutdown_costs_rule(m, g, t): if t", "else: # handle the final (MinimumDownTime[g] - 1) time periods - if a", "due to unit off) # (1, 1) - unit staying on: RHS =", "costs. def compute_shutdown_costs_rule(m, g, t): if t == 0: return m.ShutdownCost[g, t] >=", "model.CalculateRegulatingReserveUpPerGenerator = Constraint(model.Generators, model.TimePeriods, rule=calculate_regulating_reserve_up_available_per_generator) if has_zonal_reserves is True: model.EnforceZonalReserveRequirements = Constraint(model.ReserveZones, model.TimePeriods,", "g, t): if t <= value(m.InitialTimePeriodsOnLine[g]): # handled by the EnforceUpTimeConstraintInitial constraint. return", "t] * m.UnitOn[g, t] <= m.PowerGenerated[g,t] def enforce_generator_output_limits_rule_part_b(m, g, t): return m.PowerGenerated[g,t] <=", "m.UnitOn[g, t]) ) + \\ -1 * ( m.MaximumPowerOutput[g, t] * (1 -", "t) unit status: # (0, 0) - unit staying off: RHS = maximum", "- unit switching on: RHS = maximum generator output minus shutdown ramp limit", "shutdown ramp limit plus maximum generator output - this is the strangest case", "output (degenerate upper bound due to unit being off) # (0, 1) -", "m.StartupRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) ) + \\ -1 * (", "global_posneg_rule(m, t): return m.posGlobalLoadGenerateMismatch[t] - m.negGlobalLoadGenerateMismatch[t] == m.GlobalLoadGenerateMismatch[t] def enforce_reserve_requirements_rule(m, t, has_storage=False, has_non_dispatchable_generators=False,", "rule=enforce_down_time_constraints_initial) model.EnforceDownTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_down_time_constraints_subsequent) def production_cost_function(m, g, t, x): # a", "m.NetPowerInjectionAtBus[b, t] + sum(m.LinePower[l,t] for l in m.LinesTo[b]) \\ - sum(m.LinePower[l,t] for l", "m.PowerGenerated[g,t] def enforce_zonal_reserve_requirement_rule(m, rz, t): return sum(m.RegulatingReserveUpAvailable[g,t] for g in m.GeneratorsInReserveZone[rz]) >= m.ZonalReserveRequirement[rz,", "a unit is shut down in # this interval, it must remain off-line", "RHS = 0 (degenerate upper bound) # (0, 1) - unit switching on:", "model.PosLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule) model.NegLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule) model.Defineposneg_Mismatch = Constraint(model.Buses, model.TimePeriods, rule", "model.EnforceGeneratorOutputLimitsPartC = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_c) model.EnforceMaxAvailableRampUpRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_up_rates_rule) model.EnforceMaxAvailableRampDownRates = Constraint(model.Generators,", "m.Angle[m.BusTo[l], t]) def calculate_total_demand(m, t): return m.TotalDemand[t] == sum(m.Demand[b,t] for b in m.Buses)", "the strangest case #NOTE: This may never be physically true, but if a", "time periods - if a unit is shut down in # this interval,", ">= \\ m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1]) else: # handle the", "* (1 - m.UnitOnT0[g]) ) else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] >=", "* (m.UnitOn[g, t] - m.UnitOn[g, t-1]) else: # handle the final (MinimumUpTime[g] -", "n in m.TimePeriods if n >= t and n <= (t + value(m.MinimumUpTime[g])", "= ptdf model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_ptdf_rule) else: partial_fix_first_angle_rule = partial(fix_first_angle_rule, slack_bus=slack_bus) model.FixFirstAngle", "t] ) + \\ -1 * ( m.StartupRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g,", "off: RHS = 0 (degenerate upper bound) # (0, 1) - unit switching", "is True: constraint = constraint + sum(m.NondispatchablePowerUsed[g, t] for g in m.NondispatchableGeneratorsAtBus[b]) constraint", "in m.Generators) if has_non_dispatchable_generators is True: constraint = constraint + sum(m.NondispatchablePowerUsed[n,t] for n", "(m.UnitOn[g, t-1] - m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g,", "+ \\ m.NominalRampUpLimit[g] * m.UnitOn[g, t-1] + \\ m.StartupRampLimit[g] * (m.UnitOn[g, t] -", "import logging from pyomo.environ import * logger = logging.getLogger(__file__) eps = 1e-3 def", "constraint - m.ReserveRequirement[t] constraint = constraint == m.TotalDemand[t] + m.GlobalLoadGenerateMismatch[t] return constraint def", "model.TimePeriods, rule=line_power_rule) def constraint_total_demand(model): model.CalculateTotalDemand = Constraint(model.TimePeriods, rule=calculate_total_demand) def constraint_load_generation_mismatch(model): model.PosLoadGenerateMismatchTolerance = Constraint(model.Buses,", "per-time period shutdown costs. def compute_shutdown_costs_rule(m, g, t): if t == 0: return", "\\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g,", "== value(m.NumTimePeriods): # return Constraint.Skip #else: # return m.MaximumPowerAvailable[g, t] <= \\ #", "the initial condition. @simple_constraint_rule def enforce_down_time_constraints_subsequent(m, g, t): if t <= value(m.InitialTimePeriodsOffLine[g]): #", "to the positive and negative parts of the mismatch def posneg_rule(m, b, t):", "- unit switching off: RHS = shutdown ramp limit # (1, 1) -", "#This version fixes the problem with ignoring initial conditions mentioned in the above", "* m.UnitOn[g, t] <= m.PowerGenerated[g,t] def enforce_generator_output_limits_rule_part_b(m, g, t): return m.PowerGenerated[g,t] <= m.MaximumPowerAvailable[g,", "m.ShutdownCost[g,t] for g in m.Generators for t in m.CommitmentTimeInStage[st]) + sum(sum(m.UnitOn[g,t] * m.MinimumProductionCost[g,", "<= m.PowerGenerated[g,t] def enforce_generator_output_limits_rule_part_b(m, g, t): return m.PowerGenerated[g,t] <= m.MaximumPowerAvailable[g, t] def enforce_generator_output_limits_rule_part_c(m,", "off-line until the end of the time span. if t == 0: #", "for n in m.TimePeriods if n >= t and n <= (t +", "* m.UnitOn[g, t] def enforce_max_available_ramp_up_rates_rule(m, g, t): # 4 cases, split by (t-1,", "m.Generators for t in m.GenerationTimeInStage[st]) + m.LoadMismatchPenalty * \\ (sum(m.posLoadGenerateMismatch[b, t] + m.negLoadGenerateMismatch[b,", "has_global_reserves is True: fn_enforce_reserve_requirements = partial(enforce_reserve_requirements_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators, has_global_reserves=has_global_reserves) model.EnforceReserveRequirements = Constraint(model.TimePeriods, rule=fn_enforce_reserve_requirements)", "m.UnitOn[g, t-1] + \\ m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1]) + \\", "t] - m.UnitOnT0[g])) for n in m.TimePeriods if n >= t) >= 0.0", "model.TimePeriods, rule=upper_line_power_bounds_rule) if ptdf is not None: model.PTDF = ptdf model.CalculateLinePower = Constraint(model.TransmissionLines,", "+ value(m.MinimumDownTime[g]) - 1)) >= \\ m.MinimumDownTime[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) else:", "Constraint(model.Buses, model.TimePeriods, rule=partial_net_power_at_bus_rule) ################################################ def constraint_line(model, ptdf=None, slack_bus=1): model.LinePowerConstraintLower = Constraint(model.TransmissionLines, model.TimePeriods, rule=lower_line_power_bounds_rule)", "+ value(m.MinimumUpTime[g]) - 1)) >= \\ m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) else:", "- (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOn[g,", "(m.UnitOn[g, t] - m.UnitOnT0[g]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t])", "below are only positive if the unit was off in the previous time", "rule=upper_line_power_bounds_rule) if ptdf is not None: model.PTDF = ptdf model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods,", "rule=enforce_up_time_constraints_subsequent) model.EnforceDownTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_down_time_constraints_initial) model.EnforceDownTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_down_time_constraints_subsequent) def production_cost_function(m, g,", "horizons are specified return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOnT0[g])) for n", "consecutive time periods that the unit is required to be on. if t", "has_storage=False, has_non_dispatchable_generators=False): partial_net_power_at_bus_rule = partial(net_power_at_bus_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.CalculateNetPowerAtBus = Constraint(model.Buses, model.TimePeriods, rule=partial_net_power_at_bus_rule) ################################################", "t] \\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOnT0[g])) else: return m.StartupCost[g, t]", "m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOn[g, t-1])) # compute the per-generator, per-time period", "t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] -", "def constraint_up_down_time(model): model.EnforceUpTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_up_time_constraints_initial) model.EnforceUpTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_up_time_constraints_subsequent) model.EnforceDownTimeConstraintsInitial =", "t in m.GenerationTimeInStage[st])) def StageCost_rule(m, st): return m.StageCost[st] == m.GenerationStageCost[st] + m.CommitmentStageCost[st] def", ") + \\ -1 * ( m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1])", "t] <= sum( m.UnitOn[g, i] for i in range(t - m.ColdStartHours[g], t) )", "ptdf is not None: model.PTDF = ptdf model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_ptdf_rule) else:", "* (m.UnitOn[g, t-1] - m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t] * (1 -", "{} \".format(m.BusFrom[l], m.BusTo[l])) return Constraint.Skip else: return m.LinePower[l,t] == m.B[l] * (m.Angle[m.BusFrom[l], t]", "* m.UnitOnT0[g] + \\ m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) + \\ m.MaximumPowerOutput[g,", "happen when small time horizons are specified return sum((m.UnitOn[g, n] - (m.UnitOn[g, t]", "t): return m.LinePower[l,t] == sum(float(m.PTDF[l, i]) * m.NetPowerInjectionAtBus[b, t] for i, b in", "m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\", "be on. if t == 0: return sum((1 - m.UnitOn[g, n]) for n", "<= (t + value(m.MinimumUpTime[g]) - 1)) >= \\ m.MinimumUpTime[g] * (m.UnitOn[g, t] -", "bound) # (0, 1) - unit switching on: RHS = standard ramp-down limit", "m.GeneratorsInReserveZone[rz]) >= m.ZonalReserveRequirement[rz, t] def enforce_generator_output_limits_rule_part_a(m, g, t): return m.MinimumPowerOutput[g, t] * m.UnitOn[g,", "return m.CommitmentStageCost[st] == (sum(m.StartupCost[g,t] + m.ShutdownCost[g,t] for g in m.Generators for t in", "m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i in range(t - m.ColdStartHours[g], t)", "is started up in # this interval, it must remain on-line until the", "def pos_load_generate_mismatch_tolerance_rule(m, b): return sum((m.posLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0 def power_balance(m,", "= shutdown ramp limit # (1, 1) - unit staying on: RHS =", "unit switching on: RHS = startup ramp limit # (1, 0) - unit", "- unit staying off: RHS = maximum generator output (degenerate upper bound) #", "for n in m.TimePeriods if n >= t) >= 0.0 else: return sum(((1", "m.LinePower[l, t] else: return Constraint.Skip def upper_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l])", "=> # the value is the minimum number of subsequent consecutive time periods", "power_balance(m, b, t, has_storage=False, has_non_dispatchable_generators=False): # Power balance at each node (S) #", "t in m.CommitmentTimeInStage[st]) + sum(sum(m.UnitOn[g,t] * m.MinimumProductionCost[g, t] for t in m.CommitmentTimeInStage[st]) *", "== m.B[l] * (m.Angle[m.BusFrom[l], t] - m.Angle[m.BusTo[l], t]) def calculate_total_demand(m, t): return m.TotalDemand[t]", "1)) >= \\ m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) else: return sum(m.UnitOn[g, n]", "model.TimePeriods, rule=enforce_down_time_constraints_subsequent) def production_cost_function(m, g, t, x): # a function for use in", "def constraint_net_power(model, has_storage=False, has_non_dispatchable_generators=False): partial_net_power_at_bus_rule = partial(net_power_at_bus_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.CalculateNetPowerAtBus = Constraint(model.Buses, model.TimePeriods,", "model.TimePeriods, rule=enforce_max_available_ramp_up_rates_rule) model.EnforceMaxAvailableRampDownRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_down_rates_rule) model.EnforceNominalRampDownLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_down_limits_rule) model.EnforceNominalRampUpLimits", "(m.UnitOnT0[g] - m.UnitOn[g,t]) else: return m.MaximumPowerAvailable[g, t-1] <= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g,", "+ \\ -1 * ( m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1]) )", "a generator has ShutdownRampLimit >> MaximumPowerOutput, this constraint causes problems # (1, 0)", "t == 0: return sum(m.UnitOn[g, n] for n in m.TimePeriods if n >=", "t] = 1 m.HotStart[g, t].fixed = True return Constraint.Skip else: return m.HotStart[g, t]", "+ m.negLoadGenerateMismatch[b, t] for b in m.Buses for t in m.GenerationTimeInStage[st]) + \\", "return sum(m.UnitOn[g, t] for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOffLine[g])) == 0.0", "m.LoadMismatchPenalty * \\ (sum(m.posLoadGenerateMismatch[b, t] + m.negLoadGenerateMismatch[b, t] for b in m.Buses for", "maximum generator output minus shutdown ramp limit (degenerate upper bound) - this is", "if has_storage is True: constraint = constraint + sum(m.PowerOutputStorage[s,t] for s in m.Storage)", "Constraint.Skip #else: # return m.MaximumPowerAvailable[g, t] <= \\ # m.MaximumPowerOutput[g] * m.UnitOn[g, t+1]", "maximum generator output - this is the strangest case #NOTE: This may never", "has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators, has_global_reserves=has_global_reserves) model.EnforceReserveRequirements = Constraint(model.TimePeriods, rule=fn_enforce_reserve_requirements) if has_regulating_reserves is True: model.CalculateRegulatingReserveUpPerGenerator =", "m.GlobalLoadGenerateMismatch[t] return constraint def calculate_regulating_reserve_up_available_per_generator(m, g, t): return m.RegulatingReserveUpAvailable[g, t] == m.MaximumPowerAvailable[g,t] -", "Constraint(model.StageSet, rule = generation_in_stage_st_cost_rule) model.Compute_Stage_Cost = Constraint(model.StageSet, rule = StageCost_rule) def objective_function(model): model.TotalCostObjective", "RHS = startup ramp limit # (1, 0) - unit switching off: RHS", "note if t == 0: # Not 100% sure of this one since", "each time period def compute_hot_start_rule(m, g, t): if t <= value(m.ColdStartHours[g]): if t", "(1 - m.UnitOn[g, t]) def enforce_max_available_ramp_down_rates_rule(m, g, t): # 4 cases, split by", "is True: constraint = constraint + sum(m.PowerOutputStorage[s,t] for s in m.Storage) if has_global_reserves", "costs for each generator, for each time period def compute_hot_start_rule(m, g, t): if", "limit plus power generated in previous time period if t == 0: return", "staying on: RHS = maximum generator output (degenerate upper bound) #NOTE: As expressed", "True: model.CalculateRegulatingReserveUpPerGenerator = Constraint(model.Generators, model.TimePeriods, rule=calculate_regulating_reserve_up_available_per_generator) if has_zonal_reserves is True: model.EnforceZonalReserveRequirements = Constraint(model.ReserveZones,", "the end of the time span. if t == 0: # can happen", "physically true, but if a generator has ShutdownRampLimit >> MaximumPowerOutput, this constraint causes", "pyomo.environ import * logger = logging.getLogger(__file__) eps = 1e-3 def fix_first_angle_rule(m,t, slack_bus=1): return", "return sum((1 - m.UnitOn[g, t]) for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOnLine[g]))", "constraint. return Constraint.Skip elif t <= (value(m.NumTimePeriods - m.MinimumUpTime[g]) + 1): # the", "constraint_up_down_time(model): model.EnforceUpTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_up_time_constraints_initial) model.EnforceUpTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_up_time_constraints_subsequent) model.EnforceDownTimeConstraintsInitial = Constraint(model.Generators,", "= sum((1 - m.GeneratorForcedOutage[g,t]) * m.GeneratorBusContributionFactor[g, b] * m.PowerGenerated[g, t] for g in", "since there is no MaximumPowerAvailableT0 return m.PowerGeneratedT0[g] <= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g,t]", "not involving the initial condition. @simple_constraint_rule def enforce_down_time_constraints_subsequent(m, g, t): if t <=", "m.Storage) if has_global_reserves is True: constraint = constraint - m.ReserveRequirement[t] constraint = constraint", "unit status: # (0, 0) - unit staying off: RHS = maximum generator", "model.TimePeriods, rule=enforce_generator_output_limits_rule_part_c) model.EnforceMaxAvailableRampUpRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_up_rates_rule) model.EnforceMaxAvailableRampDownRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_down_rates_rule) model.EnforceNominalRampDownLimits", "constraint = constraint + sum(m.NondispatchablePowerUsed[g, t] for g in m.NondispatchableGeneratorsAtBus[b]) constraint = constraint", "to unit off) # (1, 1) - unit staying on: RHS = standard", "* (m.Angle[m.BusFrom[l], t] - m.Angle[m.BusTo[l], t]) def calculate_total_demand(m, t): return m.TotalDemand[t] == sum(m.Demand[b,t]", "t] <= \\ # m.MaximumPowerOutput[g] * m.UnitOn[g, t+1] + \\ # m.ShutdownRampLimit[g] *", "if value(m.InitialTimePeriodsOffLine[g]) == 0: return Constraint.Skip return sum(m.UnitOn[g, t] for t in m.TimePeriods", "unit status # (0, 0) - unit staying off: RHS = 0 (degenerate", "t] == m.MaximumPowerAvailable[g,t] - m.PowerGenerated[g,t] def enforce_zonal_reserve_requirement_rule(m, rz, t): return sum(m.RegulatingReserveUpAvailable[g,t] for g", "t] - m.negLoadGenerateMismatch[b, t] == m.LoadGenerateMismatch[b, t] def global_posneg_rule(m, t): return m.posGlobalLoadGenerateMismatch[t] -", "ignoring initial conditions mentioned in the above note if t == 0: #", "def enforce_generator_output_limits_rule_part_b(m, g, t): return m.PowerGenerated[g,t] <= m.MaximumPowerAvailable[g, t] def enforce_generator_output_limits_rule_part_c(m, g, t):", "m.negGlobalLoadGenerateMismatch[t] == m.GlobalLoadGenerateMismatch[t] def enforce_reserve_requirements_rule(m, t, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=False): constraint = sum(m.MaximumPowerAvailable[g, t]", ">= 0.0 def commitment_in_stage_st_cost_rule(m, st): return m.CommitmentStageCost[st] == (sum(m.StartupCost[g,t] + m.ShutdownCost[g,t] for g", "+ \\ m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1]) + \\ m.MaximumPowerOutput[g, t]", "- m.MinimumUpTime[g]) + 1): # the right-hand side terms below are only positive", "elif t <= (value(m.NumTimePeriods - m.MinimumUpTime[g]) + 1): # the right-hand side terms", "model.TimePeriods, rule=enforce_max_available_ramp_down_rates_rule) model.EnforceNominalRampDownLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_down_limits_rule) model.EnforceNominalRampUpLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_up_limits_rule) def", "t] \\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOn[g, t-1])) # compute the", "value(m.InitialTimePeriodsOffLine[g]): # handled by the EnforceDownTimeConstraintInitial constraint. return Constraint.Skip elif t <= (value(m.NumTimePeriods", "t+1]) #This version fixes the problem with ignoring initial conditions mentioned in the", "= constraint == m.TotalDemand[t] + m.GlobalLoadGenerateMismatch[t] return constraint def calculate_regulating_reserve_up_available_per_generator(m, g, t): return", "line_power_rule(m, l, t): if m.B[l] == 99999999: logger.debug(\" Line Power Angle constraint skipped", "- m.UnitOnT0[g]) else: return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] *", "b): return sum((m.negLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0 def pos_load_generate_mismatch_tolerance_rule(m, b): return", "m.GenerationTimeInStage[st]) + \\ sum(m.posGlobalLoadGenerateMismatch[t] + m.negGlobalLoadGenerateMismatch[t] for t in m.GenerationTimeInStage[st])) def StageCost_rule(m, st):", "functools import partial import logging from pyomo.environ import * logger = logging.getLogger(__file__) eps", "= Constraint(model.TimePeriods, rule=partial_fix_first_angle_rule) model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_rule) def constraint_total_demand(model): model.CalculateTotalDemand = Constraint(model.TimePeriods,", "for t in m.CommitmentTimeInStage[st]) + sum(sum(m.UnitOn[g,t] * m.MinimumProductionCost[g, t] for t in m.CommitmentTimeInStage[st])", "model.ComputeStartupCostsMinusM = Constraint(model.Generators, model.TimePeriods, rule=compute_startup_costs_rule_minusM) model.ComputeShutdownCosts = Constraint(model.Generators, model.TimePeriods, rule=compute_shutdown_costs_rule) model.Compute_commitment_in_stage_st_cost = Constraint(model.StageSet,", "return m.MaximumPowerAvailable[g, t-1] <= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g]", "m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1]) else: # handle the final (MinimumUpTime[g]", "Constraint(model.Buses, model.TimePeriods, rule=fn_power_balance) def constraint_reserves(model, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=True, has_regulating_reserves=True, has_zonal_reserves=False): if has_global_reserves is", "initial conditions to t=1! #if t == value(m.NumTimePeriods): # return Constraint.Skip #else: #", "enforce_generator_output_limits_rule_part_b(m, g, t): return m.PowerGenerated[g,t] <= m.MaximumPowerAvailable[g, t] def enforce_generator_output_limits_rule_part_c(m, g, t): return", "has_global_reserves=has_global_reserves) model.EnforceReserveRequirements = Constraint(model.TimePeriods, rule=fn_enforce_reserve_requirements) if has_regulating_reserves is True: model.CalculateRegulatingReserveUpPerGenerator = Constraint(model.Generators, model.TimePeriods,", "# the right-hand side terms below are only positive if the unit was", "case #NOTE: This may never be physically true, but if a generator has", "if a generator has ShutdownRampLimit >> MaximumPowerOutput, this constraint causes problems # (1,", "= Constraint(model.Buses, model.TimePeriods, rule = posneg_rule) model.Global_Defineposneg_Mismatch = Constraint(model.TimePeriods, rule = global_posneg_rule) def", "RHS = maximum generator output (degenerate upper bound) #NOTE: As expressed in Carrion-Arroyo", "t == 0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] >= \\ -1 * (", "constraint def calculate_regulating_reserve_up_available_per_generator(m, g, t): return m.RegulatingReserveUpAvailable[g, t] == m.MaximumPowerAvailable[g,t] - m.PowerGenerated[g,t] def", "here, this constraint does NOT consider ramp down from initial conditions to t=1!", "def enforce_down_time_constraints_subsequent(m, g, t): if t <= value(m.InitialTimePeriodsOffLine[g]): # handled by the EnforceDownTimeConstraintInitial", "m.HotStartCost[g])*m.HotStart[g, t] \\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOn[g, t-1])) # compute", "sum((m.negLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0 def pos_load_generate_mismatch_tolerance_rule(m, b): return sum((m.posLoadGenerateMismatch[b,t] for", "has_non_dispatchable_generators=has_non_dispatchable_generators) model.CalculateNetPowerAtBus = Constraint(model.Buses, model.TimePeriods, rule=partial_net_power_at_bus_rule) ################################################ def constraint_line(model, ptdf=None, slack_bus=1): model.LinePowerConstraintLower =", "in m.GenerationTimeInStage[st]) + m.LoadMismatchPenalty * \\ (sum(m.posLoadGenerateMismatch[b, t] + m.negLoadGenerateMismatch[b, t] for b", "= constraint + m.LoadGenerateMismatch[b,t] constraint = constraint - m.Demand[b, t] constraint = m.NetPowerInjectionAtBus[b,", "m.PowerGenerationPiecewiseValues[g,t][x] * m.FuelCost[g] def constraint_for_cost(model): model.ComputeProductionCosts = Piecewise(model.Generators * model.TimePeriods, model.ProductionCost, model.PowerGenerated, pw_pts=model.PowerGenerationPiecewisePoints,", "model.EnforceMaxAvailableRampDownRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_down_rates_rule) model.EnforceNominalRampDownLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_down_limits_rule) model.EnforceNominalRampUpLimits = Constraint(model.Generators,", "n in m.TimePeriods if n >= t) >= 0.0 else: return sum((m.UnitOn[g, n]", "upper bound) # (0, 1) - unit switching on: RHS = maximum generator", "n in m.TimePeriods if n >= t) >= 0.0 else: return sum(((1 -", "Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_up_rates_rule) model.EnforceMaxAvailableRampDownRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_down_rates_rule) model.EnforceNominalRampDownLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_down_limits_rule)", "\\ -1 * ( m.StartupRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) ) +", "m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) +", "delta from m.PowerGenerated[g, t-1]) # (0, 0) - unit staying off: RHS =", "initial condition. @simple_constraint_rule def enforce_up_time_constraints_subsequent(m, g, t): if t <= value(m.InitialTimePeriodsOnLine[g]): # handled", "of this one since there is no MaximumPowerAvailableT0 return m.PowerGeneratedT0[g] <= \\ m.MaximumPowerOutput[g,", "t]) else: # handle the final (MinimumDownTime[g] - 1) time periods - if", "( m.StartupRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) ) + \\ -1 * (", "off: RHS = shutdown ramp limit # (1, 1) - unit staying on:", "happen when small time horizons are specified return sum(((1 - m.UnitOn[g, n]) -", "# can happen when small time horizons are specified return sum((m.UnitOn[g, n] -", "power generated in previous time period if t == 0: return m.MaximumPowerAvailable[g, t]", "model.TimePeriods, rule=enforce_zonal_reserve_requirement_rule) def constraint_generator_power(model): model.EnforceGeneratorOutputLimitsPartA = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_a) model.EnforceGeneratorOutputLimitsPartB = Constraint(model.Generators, model.TimePeriods,", "== m.TotalDemand[t] + m.GlobalLoadGenerateMismatch[t] return constraint def calculate_regulating_reserve_up_available_per_generator(m, g, t): return m.RegulatingReserveUpAvailable[g, t]", "t]) else: return sum((1 - m.UnitOn[g, n]) for n in m.TimePeriods if n", "else: return sum(m.UnitOn[g, n] for n in m.TimePeriods if n >= t and", "+ \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t]) else: return m.MaximumPowerAvailable[g, t]", "0.0 def lower_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return -m.ThermalLimit[l]", "sum((1 - m.GeneratorForcedOutage[g,t]) * m.GeneratorBusContributionFactor[g, b] * m.PowerGenerated[g, t] for g in m.GeneratorsAtBus[b])", "in Carrion-Arroyo and subsequently here, this constraint does NOT consider ramp down from", "* (m.UnitOnT0[g] - m.UnitOn[g,t]) else: return m.MaximumPowerAvailable[g, t-1] <= \\ m.MaximumPowerOutput[g, t] *", "maximum generator output (degenerate upper bound) # (0, 1) - unit switching on:", "the value is the minimum number of subsequent consecutive time periods that the", "rule=enforce_max_available_ramp_down_rates_rule) model.EnforceNominalRampDownLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_down_limits_rule) model.EnforceNominalRampUpLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_up_limits_rule) def constraint_up_down_time(model):", "g in m.Generators for t in m.GenerationTimeInStage[st]) + m.LoadMismatchPenalty * \\ (sum(m.posLoadGenerateMismatch[b, t]", "m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g,", "rule=enforce_generator_output_limits_rule_part_a) model.EnforceGeneratorOutputLimitsPartB = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_b) model.EnforceGeneratorOutputLimitsPartC = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_c) model.EnforceMaxAvailableRampUpRates =", "- m.PowerGenerated[g,t] def enforce_zonal_reserve_requirement_rule(m, rz, t): return sum(m.RegulatingReserveUpAvailable[g,t] for g in m.GeneratorsInReserveZone[rz]) >=", "Constraint(model.Buses, model.TimePeriods, rule = posneg_rule) model.Global_Defineposneg_Mismatch = Constraint(model.TimePeriods, rule = global_posneg_rule) def constraint_power_balance(model,", "= Constraint(model.Buses, model.TimePeriods, rule=fn_power_balance) def constraint_reserves(model, has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=True, has_regulating_reserves=True, has_zonal_reserves=False): if has_global_reserves", "m.MinimumDownTime[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) else: # handle the final (MinimumDownTime[g]", "st in m.StageSet) def constraint_net_power(model, has_storage=False, has_non_dispatchable_generators=False): partial_net_power_at_bus_rule = partial(net_power_at_bus_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.CalculateNetPowerAtBus", "true, but if a generator has ShutdownRampLimit >> MaximumPowerOutput, this constraint causes problems", "<= value(m.ColdStartHours[g]): if t - value(m.ColdStartHours[g]) <= value(m.UnitOnT0State[g]): m.HotStart[g, t] = 1 m.HotStart[g,", "return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOnT0[g])) for n in m.TimePeriods if", ">= \\ m.MinimumDownTime[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) else: return sum((1 - m.UnitOn[g,", "has_non_dispatchable_generators is True: constraint = constraint + sum(m.NondispatchablePowerUsed[g, t] for g in m.NondispatchableGeneratorsAtBus[b])", "elif t <= (value(m.NumTimePeriods - m.MinimumDownTime[g]) + 1): # the right-hand side terms", "in m.StorageAtBus[b]) \\ - sum(m.PowerInputStorage[s, t] for s in m.StorageAtBus[b]) if has_non_dispatchable_generators is", "= maximum generator output minus shutdown ramp limit (degenerate upper bound) - this", "\\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t] * (1", "(m.UnitOnT0[g] - m.UnitOn[g, t]) else: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOn[g, t-1]", "piecewise linearization of the cost function. return m.TimePeriodLength * m.PowerGenerationPiecewiseValues[g,t][x] * m.FuelCost[g] def", "def calculate_regulating_reserve_up_available_per_generator(m, g, t): return m.RegulatingReserveUpAvailable[g, t] == m.MaximumPowerAvailable[g,t] - m.PowerGenerated[g,t] def enforce_zonal_reserve_requirement_rule(m,", "- m.UnitOn[g, n]) - (m.UnitOn[g, t-1] - m.UnitOn[g, t])) for n in m.TimePeriods", "are specified return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOnT0[g])) for n in", "n]) for n in m.TimePeriods if n >= t and n <= (t", "startup costs for each generator, for each time period def compute_hot_start_rule(m, g, t):", "return m.TimePeriodLength * m.PowerGenerationPiecewiseValues[g,t][x] * m.FuelCost[g] def constraint_for_cost(model): model.ComputeProductionCosts = Piecewise(model.Generators * model.TimePeriods,", "terms below are only positive if the unit was off in the previous", "if has_zonal_reserves is True: model.EnforceZonalReserveRequirements = Constraint(model.ReserveZones, model.TimePeriods, rule=enforce_zonal_reserve_requirement_rule) def constraint_generator_power(model): model.EnforceGeneratorOutputLimitsPartA =", "if a unit is started up in # this interval, it must remain", "g in m.GeneratorsInReserveZone[rz]) >= m.ZonalReserveRequirement[rz, t] def enforce_generator_output_limits_rule_part_a(m, g, t): return m.MinimumPowerOutput[g, t]", "generator output (degenerate upper bound due to unit being off) # (0, 1)", "value(m.MinimumUpTime[g]) - 1)) >= \\ m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) else: return", "periods - if a unit is shut down in # this interval, it", "t] * m.UnitOn[g, t] def enforce_max_available_ramp_up_rates_rule(m, g, t): # 4 cases, split by", "m.TotalDemand[t] + m.GlobalLoadGenerateMismatch[t] return constraint def calculate_regulating_reserve_up_available_per_generator(m, g, t): return m.RegulatingReserveUpAvailable[g, t] ==", "Constraint(model.TimePeriods, rule=partial_fix_first_angle_rule) model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_rule) def constraint_total_demand(model): model.CalculateTotalDemand = Constraint(model.TimePeriods, rule=calculate_total_demand)", "- unit staying on: RHS = maximum generator output (degenerate upper bound) #NOTE:", "sum(((1 - m.UnitOn[g, n]) - (m.UnitOnT0[g] - m.UnitOn[g, t])) for n in m.TimePeriods", "ramp limit plus power generated in previous time period if t == 0:", "b] * m.PowerGenerated[g, t] for g in m.GeneratorsAtBus[b]) if has_storage is True: constraint", "mentioned in the above note if t == 0: # Not 100% sure", "m.UnitOn[g, t-1]) def enforce_ramp_up_limits_rule(m, g, t): # 4 cases, split by (t-1, t)", "has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=True, has_regulating_reserves=True, has_zonal_reserves=False): if has_global_reserves is True: fn_enforce_reserve_requirements = partial(enforce_reserve_requirements_rule, has_storage=has_storage,", "partial(net_power_at_bus_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.CalculateNetPowerAtBus = Constraint(model.Buses, model.TimePeriods, rule=partial_net_power_at_bus_rule) ################################################ def constraint_line(model, ptdf=None, slack_bus=1):", "t] else: return Constraint.Skip def upper_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) >", "- m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) else: return", "limit minus shutdown ramp limit plus maximum generator output - this is the", "one => # the value is the minimum number of subsequent consecutive time", "t == 0: return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t]", "but on in this one => # the value is the minimum number", "0.0 # constraint for each time period after that not involving the initial", "(m.UnitOn[g, t] - m.UnitOnT0[g])) for n in m.TimePeriods if n >= t) >=", "partial_net_power_at_bus_rule = partial(net_power_at_bus_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.CalculateNetPowerAtBus = Constraint(model.Buses, model.TimePeriods, rule=partial_net_power_at_bus_rule) ################################################ def constraint_line(model,", "m.MaximumPowerAvailable[g,t] - m.PowerGenerated[g,t] def enforce_zonal_reserve_requirement_rule(m, rz, t): return sum(m.RegulatingReserveUpAvailable[g,t] for g in m.GeneratorsInReserveZone[rz])", "m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return m.ThermalLimit[l] >= m.LinePower[l, t] else: return Constraint.Skip", "EnforceDownTimeConstraintInitial constraint. return Constraint.Skip elif t <= (value(m.NumTimePeriods - m.MinimumDownTime[g]) + 1): #", "= constraint - m.ReserveRequirement[t] constraint = constraint == m.TotalDemand[t] + m.GlobalLoadGenerateMismatch[t] return constraint", "has_global_reserves=False): constraint = sum(m.MaximumPowerAvailable[g, t] for g in m.Generators) if has_non_dispatchable_generators is True:", ">= m.ShutdownCostCoefficient[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) else: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g]", "(m.UnitOn[g, t-1] - m.UnitOn[g, t]) def enforce_ramp_down_limits_rule(m, g, t): # 4 cases, split", "in m.TimePeriods if n >= t) >= 0.0 # constraint due to initial", "t])) for n in m.TimePeriods if n >= t) >= 0.0 else: return", "n >= t) >= 0.0 # constraint due to initial conditions. def enforce_down_time_constraints_initial(m,", "+ m.LoadGenerateMismatch[b,t] constraint = constraint - m.Demand[b, t] constraint = m.NetPowerInjectionAtBus[b, t] ==", "== 0: # Not 100% sure of this one since there is no", "in m.LinesFrom[b]) \\ + m.LoadGenerateMismatch[b,t] == 0 return constraint def net_power_at_bus_rule(m, b, t,", "and np.any(np.absolute(m.ThermalLimit[l]) > eps): return m.ThermalLimit[l] >= m.LinePower[l, t] else: return Constraint.Skip def", "0: return sum((1 - m.UnitOn[g, n]) for n in m.TimePeriods if n >=", "model.EnforceZonalReserveRequirements = Constraint(model.ReserveZones, model.TimePeriods, rule=enforce_zonal_reserve_requirement_rule) def constraint_generator_power(model): model.EnforceGeneratorOutputLimitsPartA = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_a) model.EnforceGeneratorOutputLimitsPartB", "m.MaximumPowerOutput[g, t] * m.UnitOn[g,t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g,t]) else: return", "Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_b) model.EnforceGeneratorOutputLimitsPartC = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_c) model.EnforceMaxAvailableRampUpRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_up_rates_rule)", "switching on: RHS = maximum generator output minus shutdown ramp limit (degenerate upper", "else: return Constraint.Skip def upper_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps):", "\\ m.NominalRampUpLimit[g] * m.UnitOnT0[g] + \\ m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) +", "t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) + \\ m.MaximumPowerOutput[g, t]", ">= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g, t]", "production_cost_function(m, g, t, x): # a function for use in piecewise linearization of", "in # this interval, it must remain on-line until the end of the", "(m.UnitOn[g, t-1] - m.UnitOn[g, t]) def enforce_up_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOnLine[g]) == 0: return", "<= value(m.InitialTimePeriodsOnLine[g])) == 0.0 # constraint for each time period after that not", "sum(((1 - m.UnitOn[g, n]) - (m.UnitOn[g, t-1] - m.UnitOn[g, t])) for n in", "t): if m.B[l] == 99999999: logger.debug(\" Line Power Angle constraint skipped for line", "True: constraint = constraint + sum(m.NondispatchablePowerUsed[g, t] for g in m.NondispatchableGeneratorsAtBus[b]) constraint =", "constraint. return Constraint.Skip elif t <= (value(m.NumTimePeriods - m.MinimumDownTime[g]) + 1): # the", "(MinimumDownTime[g] - 1) time periods - if a unit is shut down in", "for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOnLine[g])) == 0.0 # constraint for", "value(m.InitialTimePeriodsOffLine[g]) == 0: return Constraint.Skip return sum(m.UnitOn[g, t] for t in m.TimePeriods if", "RHS = standard ramp limit plus power generated in previous time period if", "Constraint(model.Generators, rule=enforce_up_time_constraints_initial) model.EnforceUpTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_up_time_constraints_subsequent) model.EnforceDownTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_down_time_constraints_initial) model.EnforceDownTimeConstraintsSubsequent =", "for each time period def compute_hot_start_rule(m, g, t): if t <= value(m.ColdStartHours[g]): if", "if has_global_reserves is True: constraint = constraint - m.ReserveRequirement[t] constraint = constraint ==", "as the delta from m.PowerGenerated[g, t-1]) # (0, 0) - unit staying off:", "output - this is the strangest case #NOTE: This may never be physically", "<= sum( m.UnitOn[g, i] for i in range(1, t) ) else: return m.HotStart[g,", "\\ sum(m.posGlobalLoadGenerateMismatch[t] + m.negGlobalLoadGenerateMismatch[t] for t in m.GenerationTimeInStage[st])) def StageCost_rule(m, st): return m.StageCost[st]", "in m.TimePeriods if n >= t) >= 0.0 else: return sum(((1 - m.UnitOn[g,", "ramp limit plus maximum power output (degenerate upper bound due to unit off)", "model.TimePeriods, rule=compute_startup_costs_rule_minusM) model.ComputeShutdownCosts = Constraint(model.Generators, model.TimePeriods, rule=compute_shutdown_costs_rule) model.Compute_commitment_in_stage_st_cost = Constraint(model.StageSet, rule = commitment_in_stage_st_cost_rule)", "in m.TimePeriods if n >= t) >= 0.0 def commitment_in_stage_st_cost_rule(m, st): return m.CommitmentStageCost[st]", "+ m.ShutdownCost[g,t] for g in m.Generators for t in m.CommitmentTimeInStage[st]) + sum(sum(m.UnitOn[g,t] *", "for s in m.StorageAtBus[b]) \\ - sum(m.PowerInputStorage[s, t] for s in m.StorageAtBus[b]) if", "rule=compute_startup_costs_rule_minusM) model.ComputeShutdownCosts = Constraint(model.Generators, model.TimePeriods, rule=compute_shutdown_costs_rule) model.Compute_commitment_in_stage_st_cost = Constraint(model.StageSet, rule = commitment_in_stage_st_cost_rule) model.Compute_generation_in_stage_st_cost", "\\ -1 * ( m.NominalRampUpLimit[g] * m.UnitOn[g, t] ) + \\ -1 *", "\\ -1 * ( m.StartupRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) ) + \\", "g, t): if t <= value(m.ColdStartHours[g]): if t - value(m.ColdStartHours[g]) <= value(m.UnitOnT0State[g]): m.HotStart[g,", "= partial(fix_first_angle_rule, slack_bus=slack_bus) model.FixFirstAngle = Constraint(model.TimePeriods, rule=partial_fix_first_angle_rule) model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_rule) def", "g in m.Generators for t in m.CommitmentTimeInStage[st]) + sum(sum(m.UnitOn[g,t] * m.MinimumProductionCost[g, t] for", "constraint def net_power_at_bus_rule(m, b, t, has_storage=False, has_non_dispatchable_generators=False): constraint = sum((1 - m.GeneratorForcedOutage[g,t]) *", "on: RHS = standard ramp-down limit minus shutdown ramp limit plus maximum generator", "= standard ramp-down limit if t == 0: return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t]", "in m.TimePeriods if n >= t and n <= (t + value(m.MinimumDownTime[g]) -", "-m.ThermalLimit[l] <= m.LinePower[l, t] else: return Constraint.Skip def upper_line_power_bounds_rule(m, l, t): if m.EnforceLine[l]", "constraint does NOT consider ramp down from initial conditions to t=1! #if t", "fn_power_balance = partial(power_balance, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.PowerBalance = Constraint(model.Buses, model.TimePeriods, rule=fn_power_balance) def constraint_reserves(model, has_storage=False,", "(m.UnitOn[g, t-1] - m.UnitOn[g, t]) else: # handle the final (MinimumDownTime[g] - 1)", "(0, 1) - unit switching on: RHS = standard ramp-down limit minus shutdown", "may never be physically true, but if a generator has ShutdownRampLimit >> MaximumPowerOutput,", "ramp limit (degenerate upper bound) - this is the strangest case. # (1,", "linearization of the cost function. return m.TimePeriodLength * m.PowerGenerationPiecewiseValues[g,t][x] * m.FuelCost[g] def constraint_for_cost(model):", "st): return m.StageCost[st] == m.GenerationStageCost[st] + m.CommitmentStageCost[st] def total_cost_objective_rule(m): return sum(m.StageCost[st] for st", "t] * m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t])", "(degenerate upper bound due to unit being off) # (0, 1) - unit", "# return m.MaximumPowerAvailable[g, t] <= \\ # m.MaximumPowerOutput[g] * m.UnitOn[g, t+1] + \\", "split by (t, t+1) unit status # (0, 0) - unit staying off:", "for t in m.TimePeriods)) >= 0.0 def power_balance(m, b, t, has_storage=False, has_non_dispatchable_generators=False): #", ") else: return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i in range(t", "problem with ignoring initial conditions mentioned in the above note if t ==", "m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] -", "b, time t (S) constraint = m.NetPowerInjectionAtBus[b, t] + sum(m.LinePower[l,t] for l in", "value(m.InitialTimePeriodsOnLine[g])) == 0.0 # constraint for each time period after that not involving", "return Constraint.Skip else: return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i in", "+ \\ sum(m.posGlobalLoadGenerateMismatch[t] + m.negGlobalLoadGenerateMismatch[t] for t in m.GenerationTimeInStage[st])) def StageCost_rule(m, st): return", "m.PowerGenerated[g, t-1] + \\ m.NominalRampUpLimit[g] * m.UnitOn[g, t-1] + \\ m.StartupRampLimit[g] * (m.UnitOn[g,", "st): return m.GenerationStageCost[st] == sum(m.ProductionCost[g, t] for g in m.Generators for t in", "required to be on. if t == 0: return sum(m.UnitOn[g, n] for n", "for g in m.GeneratorsInReserveZone[rz]) >= m.ZonalReserveRequirement[rz, t] def enforce_generator_output_limits_rule_part_a(m, g, t): return m.MinimumPowerOutput[g,", "by the EnforceDownTimeConstraintInitial constraint. return Constraint.Skip elif t <= (value(m.NumTimePeriods - m.MinimumDownTime[g]) +", ">= t and n <= (t + value(m.MinimumDownTime[g]) - 1)) >= \\ m.MinimumDownTime[g]", "# handled by the EnforceDownTimeConstraintInitial constraint. return Constraint.Skip elif t <= (value(m.NumTimePeriods -", "constraint + sum(m.PowerOutputStorage[s, t] for s in m.StorageAtBus[b]) \\ - sum(m.PowerInputStorage[s, t] for", "status: # (0, 0) - unit staying off: RHS = maximum generator output", "# (0, 1) - unit switching on: RHS = startup ramp limit #", "t] + sum(m.LinePower[l,t] for l in m.LinesTo[b]) \\ - sum(m.LinePower[l,t] for l in", "t): return m.MaximumPowerAvailable[g,t] <= m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] def enforce_max_available_ramp_up_rates_rule(m, g, t):", "m.GeneratorBusContributionFactor[g, b] * m.PowerGenerated[g, t] for g in m.GeneratorsAtBus[b]) if has_storage is True:", "t+1) unit status # (0, 0) - unit staying off: RHS = 0", "upper bound) # (0, 1) - unit switching on: RHS = standard ramp-down", "that not involving the initial condition. @simple_constraint_rule def enforce_up_time_constraints_subsequent(m, g, t): if t", "m.HotStart[g, t] = 1 m.HotStart[g, t].fixed = True return Constraint.Skip else: return m.HotStart[g,", "constraint_net_power(model, has_storage=False, has_non_dispatchable_generators=False): partial_net_power_at_bus_rule = partial(net_power_at_bus_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.CalculateNetPowerAtBus = Constraint(model.Buses, model.TimePeriods, rule=partial_net_power_at_bus_rule)", "rule=partial_fix_first_angle_rule) model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_rule) def constraint_total_demand(model): model.CalculateTotalDemand = Constraint(model.TimePeriods, rule=calculate_total_demand) def", "t] def enforce_generator_output_limits_rule_part_a(m, g, t): return m.MinimumPowerOutput[g, t] * m.UnitOn[g, t] <= m.PowerGenerated[g,t]", "line between {} and {} \".format(m.BusFrom[l], m.BusTo[l])) return Constraint.Skip else: return m.LinePower[l,t] ==", "\\ - m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOn[g, t-1])) # compute the per-generator,", "the right-hand side terms below are only positive if the unit was off", "+ m.negGlobalLoadGenerateMismatch[t] for t in m.GenerationTimeInStage[st])) def StageCost_rule(m, st): return m.StageCost[st] == m.GenerationStageCost[st]", "Constraint(model.TimePeriods, rule=calculate_total_demand) def constraint_load_generation_mismatch(model): model.PosLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule) model.NegLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule) model.Defineposneg_Mismatch", "= Constraint(model.TimePeriods, rule=calculate_total_demand) def constraint_load_generation_mismatch(model): model.PosLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule) model.NegLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule)", "t-1] + \\ m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1]) + \\ m.MaximumPowerOutput[g,", "unit staying on: RHS = standard ramp-down limit if t == 0: return", "Constraint(model.TransmissionLines, model.TimePeriods, rule=upper_line_power_bounds_rule) if ptdf is not None: model.PTDF = ptdf model.CalculateLinePower =", "RHS = standard ramp limit minus startup ramp limit plus maximum power output", "involving the initial condition. @simple_constraint_rule def enforce_up_time_constraints_subsequent(m, g, t): if t <= value(m.InitialTimePeriodsOnLine[g]):", "sum((m.posLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0 def power_balance(m, b, t, has_storage=False, has_non_dispatchable_generators=False):", "the unit is required to be on. if t == 0: return sum(m.UnitOn[g,", "Constraint.Skip def upper_line_power_bounds_rule(m, l, t): if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps): return m.ThermalLimit[l]", "has_storage=False, has_non_dispatchable_generators=False, has_global_reserves=False): constraint = sum(m.MaximumPowerAvailable[g, t] for g in m.Generators) if has_non_dispatchable_generators", "* ( m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) ) else: return m.PowerGenerated[g, t-1]", "this one => # the value is the minimum number of subsequent consecutive", "value(m.NumTimePeriods): # return Constraint.Skip #else: # return m.MaximumPowerAvailable[g, t] <= \\ # m.MaximumPowerOutput[g]", "ramp limit # (1, 1) - unit staying on: RHS = maximum generator", "0) - unit staying off: RHS = maximum generator output (degenerate upper bound)", "def compute_shutdown_costs_rule(m, g, t): if t == 0: return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g]", "0.0 def power_balance(m, b, t, has_storage=False, has_non_dispatchable_generators=False): # Power balance at each node", "there is no MaximumPowerAvailableT0 return m.PowerGeneratedT0[g] <= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g,t] +", "- 1) time periods - if a unit is shut down in #", "the cost function. return m.TimePeriodLength * m.PowerGenerationPiecewiseValues[g,t][x] * m.FuelCost[g] def constraint_for_cost(model): model.ComputeProductionCosts =", "for n in m.TimePeriods if n >= t) >= 0.0 # constraint due", "line_power_ptdf_rule(m, l, t): return m.LinePower[l,t] == sum(float(m.PTDF[l, i]) * m.NetPowerInjectionAtBus[b, t] for i,", "+ m.CommitmentStageCost[st] def total_cost_objective_rule(m): return sum(m.StageCost[st] for st in m.StageSet) def constraint_net_power(model, has_storage=False,", "to be on. if t == 0: return sum((1 - m.UnitOn[g, n]) for", "- m.UnitOn[g, t]) else: return m.MaximumPowerAvailable[g, t] <= m.PowerGenerated[g, t-1] + \\ m.NominalRampUpLimit[g]", "m.NominalRampUpLimit[g] * m.UnitOn[g, t] ) + \\ -1 * ( m.StartupRampLimit[g] * (m.UnitOnT0[g]", "has_non_dispatchable_generators=False): partial_net_power_at_bus_rule = partial(net_power_at_bus_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.CalculateNetPowerAtBus = Constraint(model.Buses, model.TimePeriods, rule=partial_net_power_at_bus_rule) ################################################ def", "t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) def enforce_ramp_down_limits_rule(m, g,", "partial import logging from pyomo.environ import * logger = logging.getLogger(__file__) eps = 1e-3", "started up in # this interval, it must remain on-line until the end", "* m.UnitOn[g, t] + \\ m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) +", "generation_in_stage_st_cost_rule(m, st): return m.GenerationStageCost[st] == sum(m.ProductionCost[g, t] for g in m.Generators for t", "m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) else: return m.ShutdownCost[g, t]", "m.Generators) if has_non_dispatchable_generators is True: constraint = constraint + sum(m.NondispatchablePowerUsed[n,t] for n in", "g, t, x): # a function for use in piecewise linearization of the", "slack_bus=1): model.LinePowerConstraintLower = Constraint(model.TransmissionLines, model.TimePeriods, rule=lower_line_power_bounds_rule) model.LinePowerConstraintHigher = Constraint(model.TransmissionLines, model.TimePeriods, rule=upper_line_power_bounds_rule) if ptdf", "(m.UnitOn[g, t] - m.UnitOn[g, t-1])) for n in m.TimePeriods if n >= t)", "Constraint.Skip else: return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i in range(1,", "(S) # bus b, time t (S) constraint = m.NetPowerInjectionAtBus[b, t] + sum(m.LinePower[l,t]", "= sum(m.MaximumPowerAvailable[g, t] for g in m.Generators) if has_non_dispatchable_generators is True: constraint =", "0: return Constraint.Skip return sum(m.UnitOn[g, t] for t in m.TimePeriods if t <=", "sum(m.UnitOn[g, n] for n in m.TimePeriods if n >= t and n <=", "= posneg_rule) model.Global_Defineposneg_Mismatch = Constraint(model.TimePeriods, rule = global_posneg_rule) def constraint_power_balance(model, has_storage=False, has_non_dispatchable_generators=False): fn_power_balance", "generator output - this is the strangest case #NOTE: This may never be", "the positive and negative parts of the mismatch def posneg_rule(m, b, t): return", "t]) def enforce_up_time_constraints_initial(m, g): if value(m.InitialTimePeriodsOnLine[g]) == 0: return Constraint.Skip return sum((1 -", "# bus b, time t (S) constraint = m.NetPowerInjectionAtBus[b, t] + sum(m.LinePower[l,t] for", "<= sum( m.UnitOn[g, i] for i in range(t - m.ColdStartHours[g], t) ) def", "\\ # m.ShutdownRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t+1]) #This version fixes the", "ptdf model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_ptdf_rule) else: partial_fix_first_angle_rule = partial(fix_first_angle_rule, slack_bus=slack_bus) model.FixFirstAngle =", "t) unit status (RHS is defined as the delta from m.PowerGenerated[g, t-1]) #", "time period def compute_hot_start_rule(m, g, t): if t <= value(m.ColdStartHours[g]): if t -", "rule=enforce_generator_output_limits_rule_part_b) model.EnforceGeneratorOutputLimitsPartC = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_c) model.EnforceMaxAvailableRampUpRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_up_rates_rule) model.EnforceMaxAvailableRampDownRates =", "has_storage=False, has_non_dispatchable_generators=False): fn_power_balance = partial(power_balance, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators) model.PowerBalance = Constraint(model.Buses, model.TimePeriods, rule=fn_power_balance) def", "value(m.ColdStartHours[g]): if t - value(m.ColdStartHours[g]) <= value(m.UnitOnT0State[g]): m.HotStart[g, t] = 1 m.HotStart[g, t].fixed", "<= m.PowerGeneratedT0[g] + \\ m.NominalRampUpLimit[g] * m.UnitOnT0[g] + \\ m.StartupRampLimit[g] * (m.UnitOn[g, t]", "+ \\ m.NominalRampUpLimit[g] * m.UnitOnT0[g] + \\ m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOnT0[g])", "return m.PowerGeneratedT0[g] <= \\ m.MaximumPowerOutput[g, t] * m.UnitOn[g,t] + \\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g]", "has ShutdownRampLimit >> MaximumPowerOutput, this constraint causes problems # (1, 0) - unit", "(0, 0) - unit staying off: RHS = 0 (degenerate upper bound) #", "* ( m.StartupRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) ) + \\ -1", "upper bound due to unit off) # (1, 1) - unit staying on:", "is not None: model.PTDF = ptdf model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_ptdf_rule) else: partial_fix_first_angle_rule", "n in m.TimePeriods if n >= t) >= 0.0 # constraint due to", "Constraint(model.TimePeriods, rule = global_posneg_rule) def constraint_power_balance(model, has_storage=False, has_non_dispatchable_generators=False): fn_power_balance = partial(power_balance, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators)", "remain on-line until the end of the time span. if t == 0:", "\\ m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g,t]) else: return m.MaximumPowerAvailable[g, t-1] <= \\ m.MaximumPowerOutput[g,", "a unit is started up in # this interval, it must remain on-line", "off in the previous time period but on in this one => #", "enforce_up_time_constraints_subsequent(m, g, t): if t <= value(m.InitialTimePeriodsOnLine[g]): # handled by the EnforceUpTimeConstraintInitial constraint.", "# (1, 1) - unit staying on: RHS = maximum generator output (degenerate", "t in m.TimePeriods)) >= 0.0 def pos_load_generate_mismatch_tolerance_rule(m, b): return sum((m.posLoadGenerateMismatch[b,t] for t in", "# handle the final (MinimumDownTime[g] - 1) time periods - if a unit", "- m.UnitOn[g, t-1]) else: # handle the final (MinimumUpTime[g] - 1) time periods", "n <= (t + value(m.MinimumDownTime[g]) - 1)) >= \\ m.MinimumDownTime[g] * (m.UnitOnT0[g] -", "<= value(m.UnitOnT0State[g]): m.HotStart[g, t] = 1 m.HotStart[g, t].fixed = True return Constraint.Skip else:", "parts of the mismatch def posneg_rule(m, b, t): return m.posLoadGenerateMismatch[b, t] - m.negLoadGenerateMismatch[b,", "m.posLoadGenerateMismatch[b, t] - m.negLoadGenerateMismatch[b, t] == m.LoadGenerateMismatch[b, t] def global_posneg_rule(m, t): return m.posGlobalLoadGenerateMismatch[t]", "m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] >= \\ -1 * ( m.NominalRampUpLimit[g] * m.UnitOn[g, t]", "in m.CommitmentTimeInStage[st]) + sum(sum(m.UnitOn[g,t] * m.MinimumProductionCost[g, t] for t in m.CommitmentTimeInStage[st]) * m.TimePeriodLength", "has_global_reserves is True: constraint = constraint - m.ReserveRequirement[t] constraint = constraint == m.TotalDemand[t]", "else: # handle the final (MinimumUpTime[g] - 1) time periods - if a", "Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_up_limits_rule) def constraint_up_down_time(model): model.EnforceUpTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_up_time_constraints_initial) model.EnforceUpTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods,", "return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] <= \\ m.NominalRampDownLimit[g] * m.UnitOn[g, t] +", "model.EnforceMaxAvailableRampUpRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_up_rates_rule) model.EnforceMaxAvailableRampDownRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_down_rates_rule) model.EnforceNominalRampDownLimits = Constraint(model.Generators,", "- m.UnitOnT0[g])) else: return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t]", "m.ThermalLimit[l] >= m.LinePower[l, t] else: return Constraint.Skip def line_power_ptdf_rule(m, l, t): return m.LinePower[l,t]", "m.UnitOn[g, t]) def enforce_ramp_down_limits_rule(m, g, t): # 4 cases, split by (t-1, t)" ]
[ "usb.core except ImportError: print( \"module usb.core not found, try: pip%d install --user pyusb\"", "delay): self.write(6, pattern, delay, 0, 0, 0, 0) def off(self): self.write(6, 0, 0,", "} def main(): parser = argparse.ArgumentParser() parser.add_argument(\"-l\", \"--led\", type=int, default=1) subparsers = parser.add_subparsers(dest=\"action\")", "print( \"module usb.core not found, try: pip%d install --user pyusb\" % sys.version_info[0] )", ") sys.exit(1) def hex_to_rgb(value): # http://stackoverflow.com/a/214657 value = value.lstrip(\"#\") lv = len(value) return", "pattern_parser.add_argument(\"pattern\", type=int) pattern_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) args = parser.parse_args() luxafor = Luxafor(get_device()) if", "pattern, delay, 0, 0, 0, 0) def off(self): self.write(6, 0, 0, 0, 0,", "args.color in COLORS: args.color = COLORS[args.color] if args.action == \"off\": luxafor.set_color(args.led, \"#000000\") elif", "lv // 3)) class Luxafor: def __init__(self, dev): self.dev = dev def set_color(self,", "0, 0) def off(self): self.write(6, 0, 0, 0, 0, 0) def get_device(): device", "usb.core.find(idVendor=0x04D8, idProduct=0xF372) if not device: print(\"Could not find device! Is the Luxafor connected?\")", "args.repeat) elif args.action in (\"wave\", \"w\"): luxafor.wave(args.color, args.pattern, args.delay, args.repeat) elif args.action in", "return tuple(int(value[i : i + lv // 3], 16) for i in range(0,", "red, green, blue = hex_to_rgb(hex_color) self.write(2, led, red, green, blue, duration, 0) def", "lv, lv // 3)) class Luxafor: def __init__(self, dev): self.dev = dev def", "0, 0, 0, 0) def get_device(): device = usb.core.find(idVendor=0x04D8, idProduct=0xF372) if not device:", "__init__(self, dev): self.dev = dev def set_color(self, led, hex_color): red, green, blue =", "type=int, default=100) wave_parser = subparsers.add_parser(\"wave\", aliases=(\"w\",)) wave_parser.add_argument(\"color\", type=str) wave_parser.add_argument(\"pattern\", type=int) wave_parser.add_argument(\"-d\", \"--delay\", type=int,", "(\"strobe\", \"s\"): luxafor.strobe(args.led, args.color, args.delay, args.repeat) elif args.action in (\"wave\", \"w\"): luxafor.wave(args.color, args.pattern,", "args.color, args.delay, args.repeat) elif args.action in (\"wave\", \"w\"): luxafor.wave(args.color, args.pattern, args.delay, args.repeat) elif", "in range(0, lv, lv // 3)) class Luxafor: def __init__(self, dev): self.dev =", "% sys.version_info[0] ) sys.exit(1) def hex_to_rgb(value): # http://stackoverflow.com/a/214657 value = value.lstrip(\"#\") lv =", "\"--repeat\", type=int, default=100) wave_parser = subparsers.add_parser(\"wave\", aliases=(\"w\",)) wave_parser.add_argument(\"color\", type=str) wave_parser.add_argument(\"pattern\", type=int) wave_parser.add_argument(\"-d\", \"--delay\",", "= hex_to_rgb(hex_color) self.write(1, led, red, green, blue, 0, 0) def write(self, *values): self.dev.write(1,", "= subparsers.add_parser(\"wave\", aliases=(\"w\",)) wave_parser.add_argument(\"color\", type=str) wave_parser.add_argument(\"pattern\", type=int) wave_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) wave_parser.add_argument(\"-r\", \"--repeat\",", "i + lv // 3], 16) for i in range(0, lv, lv //", "green, blue, 0, delay, repeat) def pattern(self, pattern, delay): self.write(6, pattern, delay, 0,", "http://stackoverflow.com/a/214657 value = value.lstrip(\"#\") lv = len(value) return tuple(int(value[i : i + lv", "def write(self, *values): self.dev.write(1, values) self.dev.write(1, values) def fade(self, led, hex_color, duration): red,", "device! Is the Luxafor connected?\") sys.exit(1) try: device.detach_kernel_driver(0) except usb.core.USBError: pass device.set_configuration() return", "wave_parser.add_argument(\"pattern\", type=int) wave_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) wave_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) pattern_parser = subparsers.add_parser(\"pattern\",", "not found, try: pip%d install --user pyusb\" % sys.version_info[0] ) sys.exit(1) def hex_to_rgb(value):", "hex_to_rgb(hex_color) self.write(1, led, red, green, blue, 0, 0) def write(self, *values): self.dev.write(1, values)", "dev def set_color(self, led, hex_color): red, green, blue = hex_to_rgb(hex_color) self.write(1, led, red,", "\"--delay\", type=int, default=25) args = parser.parse_args() luxafor = Luxafor(get_device()) if \"color\" in args", "args.color, args.duration) elif args.action in (\"strobe\", \"s\"): luxafor.strobe(args.led, args.color, args.delay, args.repeat) elif args.action", "elif args.action in (\"pattern\", \"p\"): luxafor.pattern(args.pattern, args.delay) else: print(\"Unknown action: %r\" % args.action)", "\"color\" in args and args.color in COLORS: args.color = COLORS[args.color] if args.action ==", "subparsers.add_parser(\"strobe\", aliases=(\"s\",)) strobe_parser.add_argument(\"color\", type=str) strobe_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) strobe_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) wave_parser", "elif args.action in (\"color\", \"c\"): luxafor.set_color(args.led, args.color) elif args.action in (\"fade-to-color\", \"f\"): luxafor.fade(args.led,", "subparsers.add_parser(\"wave\", aliases=(\"w\",)) wave_parser.add_argument(\"color\", type=str) wave_parser.add_argument(\"pattern\", type=int) wave_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) wave_parser.add_argument(\"-r\", \"--repeat\", type=int,", "elif args.action in (\"wave\", \"w\"): luxafor.wave(args.color, args.pattern, args.delay, args.repeat) elif args.action in (\"pattern\",", "delay, repeat) def pattern(self, pattern, delay): self.write(6, pattern, delay, 0, 0, 0, 0)", "not find device! Is the Luxafor connected?\") sys.exit(1) try: device.detach_kernel_driver(0) except usb.core.USBError: pass", "= Luxafor(get_device()) if \"color\" in args and args.color in COLORS: args.color = COLORS[args.color]", "def fade(self, led, hex_color, duration): red, green, blue = hex_to_rgb(hex_color) self.write(2, led, red,", "import usb.core except ImportError: print( \"module usb.core not found, try: pip%d install --user", "get_device(): device = usb.core.find(idVendor=0x04D8, idProduct=0xF372) if not device: print(\"Could not find device! Is", "type=int) pattern_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) args = parser.parse_args() luxafor = Luxafor(get_device()) if \"color\"", "type=int, default=25) strobe_parser = subparsers.add_parser(\"strobe\", aliases=(\"s\",)) strobe_parser.add_argument(\"color\", type=str) strobe_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) strobe_parser.add_argument(\"-r\",", "hex_to_rgb(hex_color) self.write(2, led, red, green, blue, duration, 0) def strobe(self, led, hex_color, delay,", "% sys.version_info[:2]) ) import argparse try: import usb.core except ImportError: print( \"module usb.core", "self.write(1, led, red, green, blue, 0, 0) def write(self, *values): self.dev.write(1, values) self.dev.write(1,", "strobe_parser.add_argument(\"color\", type=str) strobe_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) strobe_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) wave_parser = subparsers.add_parser(\"wave\",", "\"ffffff\", \"off\": \"000000\", } def main(): parser = argparse.ArgumentParser() parser.add_argument(\"-l\", \"--led\", type=int, default=1)", "subparsers.add_parser(\"pattern\", aliases=(\"p\",)) pattern_parser.add_argument(\"pattern\", type=int) pattern_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) args = parser.parse_args() luxafor =", "3)) class Luxafor: def __init__(self, dev): self.dev = dev def set_color(self, led, hex_color):", "args.action in (\"strobe\", \"s\"): luxafor.strobe(args.led, args.color, args.delay, args.repeat) elif args.action in (\"wave\", \"w\"):", "color_parser.add_argument(\"color\", type=str) fade_parser = subparsers.add_parser(\"fade-to-color\", aliases=(\"f\",)) fade_parser.add_argument(\"color\", type=str) fade_parser.add_argument(\"-d\", \"--duration\", type=int, default=25) strobe_parser", "args = parser.parse_args() luxafor = Luxafor(get_device()) if \"color\" in args and args.color in", "print(\"Could not find device! Is the Luxafor connected?\") sys.exit(1) try: device.detach_kernel_driver(0) except usb.core.USBError:", "led, hex_color, duration): red, green, blue = hex_to_rgb(hex_color) self.write(2, led, red, green, blue,", "ImportError: print( \"module usb.core not found, try: pip%d install --user pyusb\" % sys.version_info[0]", "self.dev.write(1, values) self.dev.write(1, values) def fade(self, led, hex_color, duration): red, green, blue =", "0, 0) def get_device(): device = usb.core.find(idVendor=0x04D8, idProduct=0xF372) if not device: print(\"Could not", "from __future__ import print_function import sys import os.path sys.path.append( os.path.expanduser(\"~/.local/lib/python%d.%d/site-packages\" % sys.version_info[:2]) )", "/home/andreas/code/dotfiles/.venv/bin/python from __future__ import print_function import sys import os.path sys.path.append( os.path.expanduser(\"~/.local/lib/python%d.%d/site-packages\" % sys.version_info[:2])", "0, 0, 0) def get_device(): device = usb.core.find(idVendor=0x04D8, idProduct=0xF372) if not device: print(\"Could", "blue, duration, 0) def strobe(self, led, hex_color, delay, repeat): red, green, blue =", "import print_function import sys import os.path sys.path.append( os.path.expanduser(\"~/.local/lib/python%d.%d/site-packages\" % sys.version_info[:2]) ) import argparse", "blue, 0, 0) def write(self, *values): self.dev.write(1, values) self.dev.write(1, values) def fade(self, led,", "luxafor = Luxafor(get_device()) if \"color\" in args and args.color in COLORS: args.color =", "type=int, default=25) wave_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) pattern_parser = subparsers.add_parser(\"pattern\", aliases=(\"p\",)) pattern_parser.add_argument(\"pattern\", type=int) pattern_parser.add_argument(\"-d\",", "sys.version_info[:2]) ) import argparse try: import usb.core except ImportError: print( \"module usb.core not", "red, green, blue, 0, 0) def write(self, *values): self.dev.write(1, values) self.dev.write(1, values) def", "def __init__(self, dev): self.dev = dev def set_color(self, led, hex_color): red, green, blue", "parser.add_subparsers(dest=\"action\") off_parser = subparsers.add_parser(\"off\") color_parser = subparsers.add_parser(\"color\", aliases=(\"c\",)) color_parser.add_argument(\"color\", type=str) fade_parser = subparsers.add_parser(\"fade-to-color\",", "green, blue = hex_to_rgb(hex_color) self.write(3, led, red, green, blue, delay, 0, repeat) def", "off_parser = subparsers.add_parser(\"off\") color_parser = subparsers.add_parser(\"color\", aliases=(\"c\",)) color_parser.add_argument(\"color\", type=str) fade_parser = subparsers.add_parser(\"fade-to-color\", aliases=(\"f\",))", "(\"wave\", \"w\"): luxafor.wave(args.color, args.pattern, args.delay, args.repeat) elif args.action in (\"pattern\", \"p\"): luxafor.pattern(args.pattern, args.delay)", "in COLORS: args.color = COLORS[args.color] if args.action == \"off\": luxafor.set_color(args.led, \"#000000\") elif args.action", "0) def off(self): self.write(6, 0, 0, 0, 0, 0) def get_device(): device =", "sys import os.path sys.path.append( os.path.expanduser(\"~/.local/lib/python%d.%d/site-packages\" % sys.version_info[:2]) ) import argparse try: import usb.core", "type=int, default=25) args = parser.parse_args() luxafor = Luxafor(get_device()) if \"color\" in args and", "self.dev.write(1, values) def fade(self, led, hex_color, duration): red, green, blue = hex_to_rgb(hex_color) self.write(2,", "args and args.color in COLORS: args.color = COLORS[args.color] if args.action == \"off\": luxafor.set_color(args.led,", "type=int, default=1) subparsers = parser.add_subparsers(dest=\"action\") off_parser = subparsers.add_parser(\"off\") color_parser = subparsers.add_parser(\"color\", aliases=(\"c\",)) color_parser.add_argument(\"color\",", "fade(self, led, hex_color, duration): red, green, blue = hex_to_rgb(hex_color) self.write(2, led, red, green,", "default=25) strobe_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) wave_parser = subparsers.add_parser(\"wave\", aliases=(\"w\",)) wave_parser.add_argument(\"color\", type=str) wave_parser.add_argument(\"pattern\", type=int)", "= argparse.ArgumentParser() parser.add_argument(\"-l\", \"--led\", type=int, default=1) subparsers = parser.add_subparsers(dest=\"action\") off_parser = subparsers.add_parser(\"off\") color_parser", ") import argparse try: import usb.core except ImportError: print( \"module usb.core not found,", "args.delay) else: print(\"Unknown action: %r\" % args.action) sys.exit(1) if __name__ == \"__main__\": main()", "def main(): parser = argparse.ArgumentParser() parser.add_argument(\"-l\", \"--led\", type=int, default=1) subparsers = parser.add_subparsers(dest=\"action\") off_parser", "len(value) return tuple(int(value[i : i + lv // 3], 16) for i in", "find device! Is the Luxafor connected?\") sys.exit(1) try: device.detach_kernel_driver(0) except usb.core.USBError: pass device.set_configuration()", "= subparsers.add_parser(\"color\", aliases=(\"c\",)) color_parser.add_argument(\"color\", type=str) fade_parser = subparsers.add_parser(\"fade-to-color\", aliases=(\"f\",)) fade_parser.add_argument(\"color\", type=str) fade_parser.add_argument(\"-d\", \"--duration\",", "hex_color, delay, repeat): red, green, blue = hex_to_rgb(hex_color) self.write(3, led, red, green, blue,", "= dev def set_color(self, led, hex_color): red, green, blue = hex_to_rgb(hex_color) self.write(1, led,", "pattern, red, green, blue, 0, delay, repeat) def pattern(self, pattern, delay): self.write(6, pattern,", "repeat) def pattern(self, pattern, delay): self.write(6, pattern, delay, 0, 0, 0, 0) def", "fade_parser.add_argument(\"color\", type=str) fade_parser.add_argument(\"-d\", \"--duration\", type=int, default=25) strobe_parser = subparsers.add_parser(\"strobe\", aliases=(\"s\",)) strobe_parser.add_argument(\"color\", type=str) strobe_parser.add_argument(\"-d\",", "= hex_to_rgb(hex_color) self.write(4, pattern, red, green, blue, 0, delay, repeat) def pattern(self, pattern,", "pattern_parser = subparsers.add_parser(\"pattern\", aliases=(\"p\",)) pattern_parser.add_argument(\"pattern\", type=int) pattern_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) args = parser.parse_args()", "blue = hex_to_rgb(hex_color) self.write(1, led, red, green, blue, 0, 0) def write(self, *values):", "main(): parser = argparse.ArgumentParser() parser.add_argument(\"-l\", \"--led\", type=int, default=1) subparsers = parser.add_subparsers(dest=\"action\") off_parser =", "hex_color, pattern, delay, repeat): red, green, blue = hex_to_rgb(hex_color) self.write(4, pattern, red, green,", "except usb.core.USBError: pass device.set_configuration() return device COLORS = { \"red\": \"ff0000\", \"green\": \"00ff00\",", "\"--delay\", type=int, default=25) wave_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) pattern_parser = subparsers.add_parser(\"pattern\", aliases=(\"p\",)) pattern_parser.add_argument(\"pattern\", type=int)", "0) def get_device(): device = usb.core.find(idVendor=0x04D8, idProduct=0xF372) if not device: print(\"Could not find", "red, green, blue, delay, 0, repeat) def wave(self, hex_color, pattern, delay, repeat): red,", "args.repeat) elif args.action in (\"pattern\", \"p\"): luxafor.pattern(args.pattern, args.delay) else: print(\"Unknown action: %r\" %", "\"w\"): luxafor.wave(args.color, args.pattern, args.delay, args.repeat) elif args.action in (\"pattern\", \"p\"): luxafor.pattern(args.pattern, args.delay) else:", "led, red, green, blue, duration, 0) def strobe(self, led, hex_color, delay, repeat): red,", "delay, 0, repeat) def wave(self, hex_color, pattern, delay, repeat): red, green, blue =", "sys.exit(1) try: device.detach_kernel_driver(0) except usb.core.USBError: pass device.set_configuration() return device COLORS = { \"red\":", "usb.core.USBError: pass device.set_configuration() return device COLORS = { \"red\": \"ff0000\", \"green\": \"00ff00\", \"blue\":", "Luxafor(get_device()) if \"color\" in args and args.color in COLORS: args.color = COLORS[args.color] if", "import sys import os.path sys.path.append( os.path.expanduser(\"~/.local/lib/python%d.%d/site-packages\" % sys.version_info[:2]) ) import argparse try: import", "aliases=(\"s\",)) strobe_parser.add_argument(\"color\", type=str) strobe_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) strobe_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) wave_parser =", "pip%d install --user pyusb\" % sys.version_info[0] ) sys.exit(1) def hex_to_rgb(value): # http://stackoverflow.com/a/214657 value", "default=1) subparsers = parser.add_subparsers(dest=\"action\") off_parser = subparsers.add_parser(\"off\") color_parser = subparsers.add_parser(\"color\", aliases=(\"c\",)) color_parser.add_argument(\"color\", type=str)", "= subparsers.add_parser(\"strobe\", aliases=(\"s\",)) strobe_parser.add_argument(\"color\", type=str) strobe_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) strobe_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100)", "elif args.action in (\"strobe\", \"s\"): luxafor.strobe(args.led, args.color, args.delay, args.repeat) elif args.action in (\"wave\",", "device = usb.core.find(idVendor=0x04D8, idProduct=0xF372) if not device: print(\"Could not find device! Is the", "= parser.parse_args() luxafor = Luxafor(get_device()) if \"color\" in args and args.color in COLORS:", "pattern, delay, repeat): red, green, blue = hex_to_rgb(hex_color) self.write(4, pattern, red, green, blue,", "\"--delay\", type=int, default=25) strobe_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) wave_parser = subparsers.add_parser(\"wave\", aliases=(\"w\",)) wave_parser.add_argument(\"color\", type=str)", "\"green\": \"00ff00\", \"blue\": \"0000ff\", \"purple\": \"ff00ff\", \"yellow\": \"ff9900\", \"white\": \"ffffff\", \"off\": \"000000\", }", "\"s\"): luxafor.strobe(args.led, args.color, args.delay, args.repeat) elif args.action in (\"wave\", \"w\"): luxafor.wave(args.color, args.pattern, args.delay,", "print_function import sys import os.path sys.path.append( os.path.expanduser(\"~/.local/lib/python%d.%d/site-packages\" % sys.version_info[:2]) ) import argparse try:", "\"f\"): luxafor.fade(args.led, args.color, args.duration) elif args.action in (\"strobe\", \"s\"): luxafor.strobe(args.led, args.color, args.delay, args.repeat)", "aliases=(\"c\",)) color_parser.add_argument(\"color\", type=str) fade_parser = subparsers.add_parser(\"fade-to-color\", aliases=(\"f\",)) fade_parser.add_argument(\"color\", type=str) fade_parser.add_argument(\"-d\", \"--duration\", type=int, default=25)", "if not device: print(\"Could not find device! Is the Luxafor connected?\") sys.exit(1) try:", "parser.add_argument(\"-l\", \"--led\", type=int, default=1) subparsers = parser.add_subparsers(dest=\"action\") off_parser = subparsers.add_parser(\"off\") color_parser = subparsers.add_parser(\"color\",", "fade_parser = subparsers.add_parser(\"fade-to-color\", aliases=(\"f\",)) fade_parser.add_argument(\"color\", type=str) fade_parser.add_argument(\"-d\", \"--duration\", type=int, default=25) strobe_parser = subparsers.add_parser(\"strobe\",", "*values): self.dev.write(1, values) self.dev.write(1, values) def fade(self, led, hex_color, duration): red, green, blue", "wave_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) wave_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) pattern_parser = subparsers.add_parser(\"pattern\", aliases=(\"p\",)) pattern_parser.add_argument(\"pattern\",", "argparse.ArgumentParser() parser.add_argument(\"-l\", \"--led\", type=int, default=1) subparsers = parser.add_subparsers(dest=\"action\") off_parser = subparsers.add_parser(\"off\") color_parser =", "class Luxafor: def __init__(self, dev): self.dev = dev def set_color(self, led, hex_color): red,", "delay, 0, 0, 0, 0) def off(self): self.write(6, 0, 0, 0, 0, 0)", "== \"off\": luxafor.set_color(args.led, \"#000000\") elif args.action in (\"color\", \"c\"): luxafor.set_color(args.led, args.color) elif args.action", "\"off\": luxafor.set_color(args.led, \"#000000\") elif args.action in (\"color\", \"c\"): luxafor.set_color(args.led, args.color) elif args.action in", "pattern, delay): self.write(6, pattern, delay, 0, 0, 0, 0) def off(self): self.write(6, 0,", "color_parser = subparsers.add_parser(\"color\", aliases=(\"c\",)) color_parser.add_argument(\"color\", type=str) fade_parser = subparsers.add_parser(\"fade-to-color\", aliases=(\"f\",)) fade_parser.add_argument(\"color\", type=str) fade_parser.add_argument(\"-d\",", "luxafor.fade(args.led, args.color, args.duration) elif args.action in (\"strobe\", \"s\"): luxafor.strobe(args.led, args.color, args.delay, args.repeat) elif", "in (\"fade-to-color\", \"f\"): luxafor.fade(args.led, args.color, args.duration) elif args.action in (\"strobe\", \"s\"): luxafor.strobe(args.led, args.color,", "i in range(0, lv, lv // 3)) class Luxafor: def __init__(self, dev): self.dev", "type=str) fade_parser = subparsers.add_parser(\"fade-to-color\", aliases=(\"f\",)) fade_parser.add_argument(\"color\", type=str) fade_parser.add_argument(\"-d\", \"--duration\", type=int, default=25) strobe_parser =", "args.action == \"off\": luxafor.set_color(args.led, \"#000000\") elif args.action in (\"color\", \"c\"): luxafor.set_color(args.led, args.color) elif", "delay, repeat): red, green, blue = hex_to_rgb(hex_color) self.write(3, led, red, green, blue, delay,", "Luxafor: def __init__(self, dev): self.dev = dev def set_color(self, led, hex_color): red, green,", "# http://stackoverflow.com/a/214657 value = value.lstrip(\"#\") lv = len(value) return tuple(int(value[i : i +", "hex_to_rgb(hex_color) self.write(4, pattern, red, green, blue, 0, delay, repeat) def pattern(self, pattern, delay):", "= usb.core.find(idVendor=0x04D8, idProduct=0xF372) if not device: print(\"Could not find device! Is the Luxafor", "install --user pyusb\" % sys.version_info[0] ) sys.exit(1) def hex_to_rgb(value): # http://stackoverflow.com/a/214657 value =", "in (\"color\", \"c\"): luxafor.set_color(args.led, args.color) elif args.action in (\"fade-to-color\", \"f\"): luxafor.fade(args.led, args.color, args.duration)", "try: import usb.core except ImportError: print( \"module usb.core not found, try: pip%d install", "os.path.expanduser(\"~/.local/lib/python%d.%d/site-packages\" % sys.version_info[:2]) ) import argparse try: import usb.core except ImportError: print( \"module", "luxafor.wave(args.color, args.pattern, args.delay, args.repeat) elif args.action in (\"pattern\", \"p\"): luxafor.pattern(args.pattern, args.delay) else: print(\"Unknown", "default=25) wave_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) pattern_parser = subparsers.add_parser(\"pattern\", aliases=(\"p\",)) pattern_parser.add_argument(\"pattern\", type=int) pattern_parser.add_argument(\"-d\", \"--delay\",", "(\"color\", \"c\"): luxafor.set_color(args.led, args.color) elif args.action in (\"fade-to-color\", \"f\"): luxafor.fade(args.led, args.color, args.duration) elif", "try: device.detach_kernel_driver(0) except usb.core.USBError: pass device.set_configuration() return device COLORS = { \"red\": \"ff0000\",", "0, 0, 0, 0, 0) def get_device(): device = usb.core.find(idVendor=0x04D8, idProduct=0xF372) if not", "\"000000\", } def main(): parser = argparse.ArgumentParser() parser.add_argument(\"-l\", \"--led\", type=int, default=1) subparsers =", "\"ff9900\", \"white\": \"ffffff\", \"off\": \"000000\", } def main(): parser = argparse.ArgumentParser() parser.add_argument(\"-l\", \"--led\",", "blue, 0, delay, repeat) def pattern(self, pattern, delay): self.write(6, pattern, delay, 0, 0,", "0, repeat) def wave(self, hex_color, pattern, delay, repeat): red, green, blue = hex_to_rgb(hex_color)", "= len(value) return tuple(int(value[i : i + lv // 3], 16) for i", "0, 0, 0) def off(self): self.write(6, 0, 0, 0, 0, 0) def get_device():", "= COLORS[args.color] if args.action == \"off\": luxafor.set_color(args.led, \"#000000\") elif args.action in (\"color\", \"c\"):", "blue = hex_to_rgb(hex_color) self.write(2, led, red, green, blue, duration, 0) def strobe(self, led,", "self.write(3, led, red, green, blue, delay, 0, repeat) def wave(self, hex_color, pattern, delay,", "values) self.dev.write(1, values) def fade(self, led, hex_color, duration): red, green, blue = hex_to_rgb(hex_color)", ": i + lv // 3], 16) for i in range(0, lv, lv", "16) for i in range(0, lv, lv // 3)) class Luxafor: def __init__(self,", "// 3)) class Luxafor: def __init__(self, dev): self.dev = dev def set_color(self, led,", "in (\"pattern\", \"p\"): luxafor.pattern(args.pattern, args.delay) else: print(\"Unknown action: %r\" % args.action) sys.exit(1) if", "device.detach_kernel_driver(0) except usb.core.USBError: pass device.set_configuration() return device COLORS = { \"red\": \"ff0000\", \"green\":", "subparsers.add_parser(\"fade-to-color\", aliases=(\"f\",)) fade_parser.add_argument(\"color\", type=str) fade_parser.add_argument(\"-d\", \"--duration\", type=int, default=25) strobe_parser = subparsers.add_parser(\"strobe\", aliases=(\"s\",)) strobe_parser.add_argument(\"color\",", "strobe_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) strobe_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) wave_parser = subparsers.add_parser(\"wave\", aliases=(\"w\",)) wave_parser.add_argument(\"color\",", "COLORS: args.color = COLORS[args.color] if args.action == \"off\": luxafor.set_color(args.led, \"#000000\") elif args.action in", "led, hex_color, delay, repeat): red, green, blue = hex_to_rgb(hex_color) self.write(3, led, red, green,", "green, blue = hex_to_rgb(hex_color) self.write(2, led, red, green, blue, duration, 0) def strobe(self,", "{ \"red\": \"ff0000\", \"green\": \"00ff00\", \"blue\": \"0000ff\", \"purple\": \"ff00ff\", \"yellow\": \"ff9900\", \"white\": \"ffffff\",", "\"white\": \"ffffff\", \"off\": \"000000\", } def main(): parser = argparse.ArgumentParser() parser.add_argument(\"-l\", \"--led\", type=int,", "elif args.action in (\"fade-to-color\", \"f\"): luxafor.fade(args.led, args.color, args.duration) elif args.action in (\"strobe\", \"s\"):", "parser.parse_args() luxafor = Luxafor(get_device()) if \"color\" in args and args.color in COLORS: args.color", "0) def strobe(self, led, hex_color, delay, repeat): red, green, blue = hex_to_rgb(hex_color) self.write(3,", "argparse try: import usb.core except ImportError: print( \"module usb.core not found, try: pip%d", "red, green, blue, duration, 0) def strobe(self, led, hex_color, delay, repeat): red, green,", "device: print(\"Could not find device! Is the Luxafor connected?\") sys.exit(1) try: device.detach_kernel_driver(0) except", "strobe(self, led, hex_color, delay, repeat): red, green, blue = hex_to_rgb(hex_color) self.write(3, led, red,", "0, 0, 0, 0) def off(self): self.write(6, 0, 0, 0, 0, 0) def", "Is the Luxafor connected?\") sys.exit(1) try: device.detach_kernel_driver(0) except usb.core.USBError: pass device.set_configuration() return device", "args.color) elif args.action in (\"fade-to-color\", \"f\"): luxafor.fade(args.led, args.color, args.duration) elif args.action in (\"strobe\",", "in (\"strobe\", \"s\"): luxafor.strobe(args.led, args.color, args.delay, args.repeat) elif args.action in (\"wave\", \"w\"): luxafor.wave(args.color,", "hex_color, duration): red, green, blue = hex_to_rgb(hex_color) self.write(2, led, red, green, blue, duration,", "and args.color in COLORS: args.color = COLORS[args.color] if args.action == \"off\": luxafor.set_color(args.led, \"#000000\")", "\"module usb.core not found, try: pip%d install --user pyusb\" % sys.version_info[0] ) sys.exit(1)", "value.lstrip(\"#\") lv = len(value) return tuple(int(value[i : i + lv // 3], 16)", "args.pattern, args.delay, args.repeat) elif args.action in (\"pattern\", \"p\"): luxafor.pattern(args.pattern, args.delay) else: print(\"Unknown action:", "self.write(4, pattern, red, green, blue, 0, delay, repeat) def pattern(self, pattern, delay): self.write(6,", "usb.core not found, try: pip%d install --user pyusb\" % sys.version_info[0] ) sys.exit(1) def", "strobe_parser = subparsers.add_parser(\"strobe\", aliases=(\"s\",)) strobe_parser.add_argument(\"color\", type=str) strobe_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) strobe_parser.add_argument(\"-r\", \"--repeat\", type=int,", "write(self, *values): self.dev.write(1, values) self.dev.write(1, values) def fade(self, led, hex_color, duration): red, green,", "strobe_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) wave_parser = subparsers.add_parser(\"wave\", aliases=(\"w\",)) wave_parser.add_argument(\"color\", type=str) wave_parser.add_argument(\"pattern\", type=int) wave_parser.add_argument(\"-d\",", "--user pyusb\" % sys.version_info[0] ) sys.exit(1) def hex_to_rgb(value): # http://stackoverflow.com/a/214657 value = value.lstrip(\"#\")", "try: pip%d install --user pyusb\" % sys.version_info[0] ) sys.exit(1) def hex_to_rgb(value): # http://stackoverflow.com/a/214657", "duration): red, green, blue = hex_to_rgb(hex_color) self.write(2, led, red, green, blue, duration, 0)", "wave_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) pattern_parser = subparsers.add_parser(\"pattern\", aliases=(\"p\",)) pattern_parser.add_argument(\"pattern\", type=int) pattern_parser.add_argument(\"-d\", \"--delay\", type=int,", "args.action in (\"pattern\", \"p\"): luxafor.pattern(args.pattern, args.delay) else: print(\"Unknown action: %r\" % args.action) sys.exit(1)", "__future__ import print_function import sys import os.path sys.path.append( os.path.expanduser(\"~/.local/lib/python%d.%d/site-packages\" % sys.version_info[:2]) ) import", "= hex_to_rgb(hex_color) self.write(3, led, red, green, blue, delay, 0, repeat) def wave(self, hex_color,", "values) def fade(self, led, hex_color, duration): red, green, blue = hex_to_rgb(hex_color) self.write(2, led,", "for i in range(0, lv, lv // 3)) class Luxafor: def __init__(self, dev):", "repeat): red, green, blue = hex_to_rgb(hex_color) self.write(4, pattern, red, green, blue, 0, delay,", "pattern_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) args = parser.parse_args() luxafor = Luxafor(get_device()) if \"color\" in", "pyusb\" % sys.version_info[0] ) sys.exit(1) def hex_to_rgb(value): # http://stackoverflow.com/a/214657 value = value.lstrip(\"#\") lv", "luxafor.set_color(args.led, args.color) elif args.action in (\"fade-to-color\", \"f\"): luxafor.fade(args.led, args.color, args.duration) elif args.action in", "args.action in (\"wave\", \"w\"): luxafor.wave(args.color, args.pattern, args.delay, args.repeat) elif args.action in (\"pattern\", \"p\"):", "off(self): self.write(6, 0, 0, 0, 0, 0) def get_device(): device = usb.core.find(idVendor=0x04D8, idProduct=0xF372)", "type=int) wave_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) wave_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) pattern_parser = subparsers.add_parser(\"pattern\", aliases=(\"p\",))", "aliases=(\"p\",)) pattern_parser.add_argument(\"pattern\", type=int) pattern_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) args = parser.parse_args() luxafor = Luxafor(get_device())", "self.write(6, pattern, delay, 0, 0, 0, 0) def off(self): self.write(6, 0, 0, 0,", "def strobe(self, led, hex_color, delay, repeat): red, green, blue = hex_to_rgb(hex_color) self.write(3, led,", "(\"fade-to-color\", \"f\"): luxafor.fade(args.led, args.color, args.duration) elif args.action in (\"strobe\", \"s\"): luxafor.strobe(args.led, args.color, args.delay,", "\"yellow\": \"ff9900\", \"white\": \"ffffff\", \"off\": \"000000\", } def main(): parser = argparse.ArgumentParser() parser.add_argument(\"-l\",", "\"--led\", type=int, default=1) subparsers = parser.add_subparsers(dest=\"action\") off_parser = subparsers.add_parser(\"off\") color_parser = subparsers.add_parser(\"color\", aliases=(\"c\",))", "hex_to_rgb(value): # http://stackoverflow.com/a/214657 value = value.lstrip(\"#\") lv = len(value) return tuple(int(value[i : i", "subparsers.add_parser(\"off\") color_parser = subparsers.add_parser(\"color\", aliases=(\"c\",)) color_parser.add_argument(\"color\", type=str) fade_parser = subparsers.add_parser(\"fade-to-color\", aliases=(\"f\",)) fade_parser.add_argument(\"color\", type=str)", "connected?\") sys.exit(1) try: device.detach_kernel_driver(0) except usb.core.USBError: pass device.set_configuration() return device COLORS = {", "= hex_to_rgb(hex_color) self.write(2, led, red, green, blue, duration, 0) def strobe(self, led, hex_color,", "(\"pattern\", \"p\"): luxafor.pattern(args.pattern, args.delay) else: print(\"Unknown action: %r\" % args.action) sys.exit(1) if __name__", "idProduct=0xF372) if not device: print(\"Could not find device! Is the Luxafor connected?\") sys.exit(1)", "def pattern(self, pattern, delay): self.write(6, pattern, delay, 0, 0, 0, 0) def off(self):", "def get_device(): device = usb.core.find(idVendor=0x04D8, idProduct=0xF372) if not device: print(\"Could not find device!", "red, green, blue, 0, delay, repeat) def pattern(self, pattern, delay): self.write(6, pattern, delay,", "luxafor.strobe(args.led, args.color, args.delay, args.repeat) elif args.action in (\"wave\", \"w\"): luxafor.wave(args.color, args.pattern, args.delay, args.repeat)", "\"0000ff\", \"purple\": \"ff00ff\", \"yellow\": \"ff9900\", \"white\": \"ffffff\", \"off\": \"000000\", } def main(): parser", "= parser.add_subparsers(dest=\"action\") off_parser = subparsers.add_parser(\"off\") color_parser = subparsers.add_parser(\"color\", aliases=(\"c\",)) color_parser.add_argument(\"color\", type=str) fade_parser =", "in (\"wave\", \"w\"): luxafor.wave(args.color, args.pattern, args.delay, args.repeat) elif args.action in (\"pattern\", \"p\"): luxafor.pattern(args.pattern,", "type=str) fade_parser.add_argument(\"-d\", \"--duration\", type=int, default=25) strobe_parser = subparsers.add_parser(\"strobe\", aliases=(\"s\",)) strobe_parser.add_argument(\"color\", type=str) strobe_parser.add_argument(\"-d\", \"--delay\",", "Luxafor connected?\") sys.exit(1) try: device.detach_kernel_driver(0) except usb.core.USBError: pass device.set_configuration() return device COLORS =", "aliases=(\"w\",)) wave_parser.add_argument(\"color\", type=str) wave_parser.add_argument(\"pattern\", type=int) wave_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) wave_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100)", "args.action in (\"fade-to-color\", \"f\"): luxafor.fade(args.led, args.color, args.duration) elif args.action in (\"strobe\", \"s\"): luxafor.strobe(args.led,", "wave_parser.add_argument(\"color\", type=str) wave_parser.add_argument(\"pattern\", type=int) wave_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) wave_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) pattern_parser", "blue = hex_to_rgb(hex_color) self.write(3, led, red, green, blue, delay, 0, repeat) def wave(self,", "fade_parser.add_argument(\"-d\", \"--duration\", type=int, default=25) strobe_parser = subparsers.add_parser(\"strobe\", aliases=(\"s\",)) strobe_parser.add_argument(\"color\", type=str) strobe_parser.add_argument(\"-d\", \"--delay\", type=int,", "found, try: pip%d install --user pyusb\" % sys.version_info[0] ) sys.exit(1) def hex_to_rgb(value): #", "value = value.lstrip(\"#\") lv = len(value) return tuple(int(value[i : i + lv //", "def set_color(self, led, hex_color): red, green, blue = hex_to_rgb(hex_color) self.write(1, led, red, green,", "blue = hex_to_rgb(hex_color) self.write(4, pattern, red, green, blue, 0, delay, repeat) def pattern(self,", "lv // 3], 16) for i in range(0, lv, lv // 3)) class", "\"ff0000\", \"green\": \"00ff00\", \"blue\": \"0000ff\", \"purple\": \"ff00ff\", \"yellow\": \"ff9900\", \"white\": \"ffffff\", \"off\": \"000000\",", "= { \"red\": \"ff0000\", \"green\": \"00ff00\", \"blue\": \"0000ff\", \"purple\": \"ff00ff\", \"yellow\": \"ff9900\", \"white\":", "device.set_configuration() return device COLORS = { \"red\": \"ff0000\", \"green\": \"00ff00\", \"blue\": \"0000ff\", \"purple\":", "not device: print(\"Could not find device! Is the Luxafor connected?\") sys.exit(1) try: device.detach_kernel_driver(0)", "red, green, blue = hex_to_rgb(hex_color) self.write(4, pattern, red, green, blue, 0, delay, repeat)", "parser = argparse.ArgumentParser() parser.add_argument(\"-l\", \"--led\", type=int, default=1) subparsers = parser.add_subparsers(dest=\"action\") off_parser = subparsers.add_parser(\"off\")", "subparsers.add_parser(\"color\", aliases=(\"c\",)) color_parser.add_argument(\"color\", type=str) fade_parser = subparsers.add_parser(\"fade-to-color\", aliases=(\"f\",)) fade_parser.add_argument(\"color\", type=str) fade_parser.add_argument(\"-d\", \"--duration\", type=int,", "self.write(2, led, red, green, blue, duration, 0) def strobe(self, led, hex_color, delay, repeat):", "tuple(int(value[i : i + lv // 3], 16) for i in range(0, lv,", "#!/usr/bin/env /home/andreas/code/dotfiles/.venv/bin/python from __future__ import print_function import sys import os.path sys.path.append( os.path.expanduser(\"~/.local/lib/python%d.%d/site-packages\" %", "green, blue, 0, 0) def write(self, *values): self.dev.write(1, values) self.dev.write(1, values) def fade(self,", "hex_to_rgb(hex_color) self.write(3, led, red, green, blue, delay, 0, repeat) def wave(self, hex_color, pattern,", "type=str) strobe_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) strobe_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) wave_parser = subparsers.add_parser(\"wave\", aliases=(\"w\",))", "pattern(self, pattern, delay): self.write(6, pattern, delay, 0, 0, 0, 0) def off(self): self.write(6,", "args.delay, args.repeat) elif args.action in (\"wave\", \"w\"): luxafor.wave(args.color, args.pattern, args.delay, args.repeat) elif args.action", "type=int, default=100) pattern_parser = subparsers.add_parser(\"pattern\", aliases=(\"p\",)) pattern_parser.add_argument(\"pattern\", type=int) pattern_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) args", "type=int, default=25) strobe_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) wave_parser = subparsers.add_parser(\"wave\", aliases=(\"w\",)) wave_parser.add_argument(\"color\", type=str) wave_parser.add_argument(\"pattern\",", "led, red, green, blue, delay, 0, repeat) def wave(self, hex_color, pattern, delay, repeat):", "sys.version_info[0] ) sys.exit(1) def hex_to_rgb(value): # http://stackoverflow.com/a/214657 value = value.lstrip(\"#\") lv = len(value)", "red, green, blue = hex_to_rgb(hex_color) self.write(1, led, red, green, blue, 0, 0) def", "aliases=(\"f\",)) fade_parser.add_argument(\"color\", type=str) fade_parser.add_argument(\"-d\", \"--duration\", type=int, default=25) strobe_parser = subparsers.add_parser(\"strobe\", aliases=(\"s\",)) strobe_parser.add_argument(\"color\", type=str)", "0, 0) def write(self, *values): self.dev.write(1, values) self.dev.write(1, values) def fade(self, led, hex_color,", "duration, 0) def strobe(self, led, hex_color, delay, repeat): red, green, blue = hex_to_rgb(hex_color)", "COLORS[args.color] if args.action == \"off\": luxafor.set_color(args.led, \"#000000\") elif args.action in (\"color\", \"c\"): luxafor.set_color(args.led,", "self.dev = dev def set_color(self, led, hex_color): red, green, blue = hex_to_rgb(hex_color) self.write(1,", "self.write(6, 0, 0, 0, 0, 0) def get_device(): device = usb.core.find(idVendor=0x04D8, idProduct=0xF372) if", "COLORS = { \"red\": \"ff0000\", \"green\": \"00ff00\", \"blue\": \"0000ff\", \"purple\": \"ff00ff\", \"yellow\": \"ff9900\",", "green, blue, duration, 0) def strobe(self, led, hex_color, delay, repeat): red, green, blue", "0) def write(self, *values): self.dev.write(1, values) self.dev.write(1, values) def fade(self, led, hex_color, duration):", "hex_color): red, green, blue = hex_to_rgb(hex_color) self.write(1, led, red, green, blue, 0, 0)", "\"#000000\") elif args.action in (\"color\", \"c\"): luxafor.set_color(args.led, args.color) elif args.action in (\"fade-to-color\", \"f\"):", "import argparse try: import usb.core except ImportError: print( \"module usb.core not found, try:", "\"--duration\", type=int, default=25) strobe_parser = subparsers.add_parser(\"strobe\", aliases=(\"s\",)) strobe_parser.add_argument(\"color\", type=str) strobe_parser.add_argument(\"-d\", \"--delay\", type=int, default=25)", "= subparsers.add_parser(\"fade-to-color\", aliases=(\"f\",)) fade_parser.add_argument(\"color\", type=str) fade_parser.add_argument(\"-d\", \"--duration\", type=int, default=25) strobe_parser = subparsers.add_parser(\"strobe\", aliases=(\"s\",))", "set_color(self, led, hex_color): red, green, blue = hex_to_rgb(hex_color) self.write(1, led, red, green, blue,", "if args.action == \"off\": luxafor.set_color(args.led, \"#000000\") elif args.action in (\"color\", \"c\"): luxafor.set_color(args.led, args.color)", "luxafor.pattern(args.pattern, args.delay) else: print(\"Unknown action: %r\" % args.action) sys.exit(1) if __name__ == \"__main__\":", "subparsers = parser.add_subparsers(dest=\"action\") off_parser = subparsers.add_parser(\"off\") color_parser = subparsers.add_parser(\"color\", aliases=(\"c\",)) color_parser.add_argument(\"color\", type=str) fade_parser", "// 3], 16) for i in range(0, lv, lv // 3)) class Luxafor:", "device COLORS = { \"red\": \"ff0000\", \"green\": \"00ff00\", \"blue\": \"0000ff\", \"purple\": \"ff00ff\", \"yellow\":", "= subparsers.add_parser(\"off\") color_parser = subparsers.add_parser(\"color\", aliases=(\"c\",)) color_parser.add_argument(\"color\", type=str) fade_parser = subparsers.add_parser(\"fade-to-color\", aliases=(\"f\",)) fade_parser.add_argument(\"color\",", "delay, repeat): red, green, blue = hex_to_rgb(hex_color) self.write(4, pattern, red, green, blue, 0,", "\"p\"): luxafor.pattern(args.pattern, args.delay) else: print(\"Unknown action: %r\" % args.action) sys.exit(1) if __name__ ==", "wave_parser = subparsers.add_parser(\"wave\", aliases=(\"w\",)) wave_parser.add_argument(\"color\", type=str) wave_parser.add_argument(\"pattern\", type=int) wave_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) wave_parser.add_argument(\"-r\",", "default=25) strobe_parser = subparsers.add_parser(\"strobe\", aliases=(\"s\",)) strobe_parser.add_argument(\"color\", type=str) strobe_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) strobe_parser.add_argument(\"-r\", \"--repeat\",", "\"00ff00\", \"blue\": \"0000ff\", \"purple\": \"ff00ff\", \"yellow\": \"ff9900\", \"white\": \"ffffff\", \"off\": \"000000\", } def", "range(0, lv, lv // 3)) class Luxafor: def __init__(self, dev): self.dev = dev", "the Luxafor connected?\") sys.exit(1) try: device.detach_kernel_driver(0) except usb.core.USBError: pass device.set_configuration() return device COLORS", "\"c\"): luxafor.set_color(args.led, args.color) elif args.action in (\"fade-to-color\", \"f\"): luxafor.fade(args.led, args.color, args.duration) elif args.action", "\"red\": \"ff0000\", \"green\": \"00ff00\", \"blue\": \"0000ff\", \"purple\": \"ff00ff\", \"yellow\": \"ff9900\", \"white\": \"ffffff\", \"off\":", "except ImportError: print( \"module usb.core not found, try: pip%d install --user pyusb\" %", "args.duration) elif args.action in (\"strobe\", \"s\"): luxafor.strobe(args.led, args.color, args.delay, args.repeat) elif args.action in", "import os.path sys.path.append( os.path.expanduser(\"~/.local/lib/python%d.%d/site-packages\" % sys.version_info[:2]) ) import argparse try: import usb.core except", "os.path sys.path.append( os.path.expanduser(\"~/.local/lib/python%d.%d/site-packages\" % sys.version_info[:2]) ) import argparse try: import usb.core except ImportError:", "\"purple\": \"ff00ff\", \"yellow\": \"ff9900\", \"white\": \"ffffff\", \"off\": \"000000\", } def main(): parser =", "repeat): red, green, blue = hex_to_rgb(hex_color) self.write(3, led, red, green, blue, delay, 0,", "default=100) wave_parser = subparsers.add_parser(\"wave\", aliases=(\"w\",)) wave_parser.add_argument(\"color\", type=str) wave_parser.add_argument(\"pattern\", type=int) wave_parser.add_argument(\"-d\", \"--delay\", type=int, default=25)", "green, blue, delay, 0, repeat) def wave(self, hex_color, pattern, delay, repeat): red, green,", "type=str) wave_parser.add_argument(\"pattern\", type=int) wave_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) wave_parser.add_argument(\"-r\", \"--repeat\", type=int, default=100) pattern_parser =", "0, delay, repeat) def pattern(self, pattern, delay): self.write(6, pattern, delay, 0, 0, 0,", "in args and args.color in COLORS: args.color = COLORS[args.color] if args.action == \"off\":", "wave(self, hex_color, pattern, delay, repeat): red, green, blue = hex_to_rgb(hex_color) self.write(4, pattern, red,", "def hex_to_rgb(value): # http://stackoverflow.com/a/214657 value = value.lstrip(\"#\") lv = len(value) return tuple(int(value[i :", "led, hex_color): red, green, blue = hex_to_rgb(hex_color) self.write(1, led, red, green, blue, 0,", "3], 16) for i in range(0, lv, lv // 3)) class Luxafor: def", "args.color = COLORS[args.color] if args.action == \"off\": luxafor.set_color(args.led, \"#000000\") elif args.action in (\"color\",", "\"ff00ff\", \"yellow\": \"ff9900\", \"white\": \"ffffff\", \"off\": \"000000\", } def main(): parser = argparse.ArgumentParser()", "+ lv // 3], 16) for i in range(0, lv, lv // 3))", "return device COLORS = { \"red\": \"ff0000\", \"green\": \"00ff00\", \"blue\": \"0000ff\", \"purple\": \"ff00ff\",", "args.delay, args.repeat) elif args.action in (\"pattern\", \"p\"): luxafor.pattern(args.pattern, args.delay) else: print(\"Unknown action: %r\"", "<reponame>anlutro/dotfiles #!/usr/bin/env /home/andreas/code/dotfiles/.venv/bin/python from __future__ import print_function import sys import os.path sys.path.append( os.path.expanduser(\"~/.local/lib/python%d.%d/site-packages\"", "sys.path.append( os.path.expanduser(\"~/.local/lib/python%d.%d/site-packages\" % sys.version_info[:2]) ) import argparse try: import usb.core except ImportError: print(", "green, blue = hex_to_rgb(hex_color) self.write(1, led, red, green, blue, 0, 0) def write(self,", "\"blue\": \"0000ff\", \"purple\": \"ff00ff\", \"yellow\": \"ff9900\", \"white\": \"ffffff\", \"off\": \"000000\", } def main():", "repeat) def wave(self, hex_color, pattern, delay, repeat): red, green, blue = hex_to_rgb(hex_color) self.write(4,", "luxafor.set_color(args.led, \"#000000\") elif args.action in (\"color\", \"c\"): luxafor.set_color(args.led, args.color) elif args.action in (\"fade-to-color\",", "\"off\": \"000000\", } def main(): parser = argparse.ArgumentParser() parser.add_argument(\"-l\", \"--led\", type=int, default=1) subparsers", "led, red, green, blue, 0, 0) def write(self, *values): self.dev.write(1, values) self.dev.write(1, values)", "def wave(self, hex_color, pattern, delay, repeat): red, green, blue = hex_to_rgb(hex_color) self.write(4, pattern,", "default=100) pattern_parser = subparsers.add_parser(\"pattern\", aliases=(\"p\",)) pattern_parser.add_argument(\"pattern\", type=int) pattern_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) args =", "sys.exit(1) def hex_to_rgb(value): # http://stackoverflow.com/a/214657 value = value.lstrip(\"#\") lv = len(value) return tuple(int(value[i", "default=25) args = parser.parse_args() luxafor = Luxafor(get_device()) if \"color\" in args and args.color", "red, green, blue = hex_to_rgb(hex_color) self.write(3, led, red, green, blue, delay, 0, repeat)", "= subparsers.add_parser(\"pattern\", aliases=(\"p\",)) pattern_parser.add_argument(\"pattern\", type=int) pattern_parser.add_argument(\"-d\", \"--delay\", type=int, default=25) args = parser.parse_args() luxafor", "if \"color\" in args and args.color in COLORS: args.color = COLORS[args.color] if args.action", "blue, delay, 0, repeat) def wave(self, hex_color, pattern, delay, repeat): red, green, blue", "= value.lstrip(\"#\") lv = len(value) return tuple(int(value[i : i + lv // 3],", "def off(self): self.write(6, 0, 0, 0, 0, 0) def get_device(): device = usb.core.find(idVendor=0x04D8,", "dev): self.dev = dev def set_color(self, led, hex_color): red, green, blue = hex_to_rgb(hex_color)", "pass device.set_configuration() return device COLORS = { \"red\": \"ff0000\", \"green\": \"00ff00\", \"blue\": \"0000ff\",", "args.action in (\"color\", \"c\"): luxafor.set_color(args.led, args.color) elif args.action in (\"fade-to-color\", \"f\"): luxafor.fade(args.led, args.color,", "\"--repeat\", type=int, default=100) pattern_parser = subparsers.add_parser(\"pattern\", aliases=(\"p\",)) pattern_parser.add_argument(\"pattern\", type=int) pattern_parser.add_argument(\"-d\", \"--delay\", type=int, default=25)", "lv = len(value) return tuple(int(value[i : i + lv // 3], 16) for", "green, blue = hex_to_rgb(hex_color) self.write(4, pattern, red, green, blue, 0, delay, repeat) def" ]
[ "UTF-8 -*- from okcoin.client import PrivateClient as OkClient from config import settings API_KEY", "OkClient from config import settings API_KEY = settings.OKCOIN_API_KEY API_SECRET = settings.OKCOIN_API_SECRET client =", "-*- coding: UTF-8 -*- from okcoin.client import PrivateClient as OkClient from config import", "-*- from okcoin.client import PrivateClient as OkClient from config import settings API_KEY =", "settings API_KEY = settings.OKCOIN_API_KEY API_SECRET = settings.OKCOIN_API_SECRET client = OkClient(API_KEY, API_SECRET) print('ticker--------->' +", "coding: UTF-8 -*- from okcoin.client import PrivateClient as OkClient from config import settings", "= settings.OKCOIN_API_SECRET client = OkClient(API_KEY, API_SECRET) print('ticker--------->' + str(client.ticker('ltc_cny'))) print('depth--------->' + str(client.depth('btc_cny'))) print('account------->'", "API_KEY = settings.OKCOIN_API_KEY API_SECRET = settings.OKCOIN_API_SECRET client = OkClient(API_KEY, API_SECRET) print('ticker--------->' + str(client.ticker('ltc_cny')))", "#!/usr/bin/python # -*- coding: UTF-8 -*- from okcoin.client import PrivateClient as OkClient from", "from okcoin.client import PrivateClient as OkClient from config import settings API_KEY = settings.OKCOIN_API_KEY", "config import settings API_KEY = settings.OKCOIN_API_KEY API_SECRET = settings.OKCOIN_API_SECRET client = OkClient(API_KEY, API_SECRET)", "client = OkClient(API_KEY, API_SECRET) print('ticker--------->' + str(client.ticker('ltc_cny'))) print('depth--------->' + str(client.depth('btc_cny'))) print('account------->' + str(client.account()))", "import settings API_KEY = settings.OKCOIN_API_KEY API_SECRET = settings.OKCOIN_API_SECRET client = OkClient(API_KEY, API_SECRET) print('ticker--------->'", "import PrivateClient as OkClient from config import settings API_KEY = settings.OKCOIN_API_KEY API_SECRET =", "settings.OKCOIN_API_KEY API_SECRET = settings.OKCOIN_API_SECRET client = OkClient(API_KEY, API_SECRET) print('ticker--------->' + str(client.ticker('ltc_cny'))) print('depth--------->' +", "PrivateClient as OkClient from config import settings API_KEY = settings.OKCOIN_API_KEY API_SECRET = settings.OKCOIN_API_SECRET", "API_SECRET = settings.OKCOIN_API_SECRET client = OkClient(API_KEY, API_SECRET) print('ticker--------->' + str(client.ticker('ltc_cny'))) print('depth--------->' + str(client.depth('btc_cny')))", "<filename>example/ex_ok.py<gh_stars>0 #!/usr/bin/python # -*- coding: UTF-8 -*- from okcoin.client import PrivateClient as OkClient", "okcoin.client import PrivateClient as OkClient from config import settings API_KEY = settings.OKCOIN_API_KEY API_SECRET", "= settings.OKCOIN_API_KEY API_SECRET = settings.OKCOIN_API_SECRET client = OkClient(API_KEY, API_SECRET) print('ticker--------->' + str(client.ticker('ltc_cny'))) print('depth--------->'", "# -*- coding: UTF-8 -*- from okcoin.client import PrivateClient as OkClient from config", "settings.OKCOIN_API_SECRET client = OkClient(API_KEY, API_SECRET) print('ticker--------->' + str(client.ticker('ltc_cny'))) print('depth--------->' + str(client.depth('btc_cny'))) print('account------->' +", "from config import settings API_KEY = settings.OKCOIN_API_KEY API_SECRET = settings.OKCOIN_API_SECRET client = OkClient(API_KEY,", "as OkClient from config import settings API_KEY = settings.OKCOIN_API_KEY API_SECRET = settings.OKCOIN_API_SECRET client" ]
[ "= unmarshal_csv_list(m, Test) assert u[0].a == \"a\", u[0].a def test__marshal_list_row_header(): class Test: _marshal_list_row_header", "= [Test(\"a\", 2), Test(\"b\", 3)] m = marshal_csv(u) assert m == [[\"abc\", \"a\",", "class A: _marshal_list_row_header = \"a\" def __init__(self, a, b): self.a = a self.b", "== 1, u.a[0] def test_unmarshal_csv_raises_attribute_error(): class A: pass with pytest.raises(AttributeError): unmarshal_csv([], A) def", "m[0][0] == \"a\", m[0][0] u = unmarshal_csv_list(m, Test) assert u[0].a == \"a\", u[0].a", "u = [Test(\"a\", 2), Test(\"b\", 3)] m = marshal_csv(u) assert m == [[\"abc\",", "== ['a', 1], m assert m[1] == ['b', 2], m u = unmarshal_csv(m,", "Test: def __init__(self, a, b): self.a = a self.b = b u =", "yield x yield self.c d = D([A(1, 2)], [B(3, 4)], C(5, 6)) m", "assert u.a == 1, u.a assert u.b == 2, u.b def test_csv_cast_empty_str_to_none(): func", "m = marshal_csv(a) assert m[0] == ['a', 1], m assert m[1] == ['b',", "_marshal_list_row_header = \"abc\" def __init__(self, a, b): self.a = a self.b = b", "= \"abc\" def __init__(self, a, b): self.a = a self.b = b u", "= a self.b = b class C: _marshal_list_row_header = \"c\" def __init__(self, a,", "u.a[0] def test_unmarshal_csv_raises_attribute_error(): class A: pass with pytest.raises(AttributeError): unmarshal_csv([], A) def test_unmarshal_csv_raises_value_error(): class", "_marshal_list_row_header = \"c\" def __init__(self, a, b): self.a = a self.b = b", "= a self.b = b self.c = c def __iter__(self): for x in", "= b self.c = c def __iter__(self): for x in self.a: yield x", "A: _unmarshal_csv_map = { 'a': {'arg_name': 'a', 'type': object}, } def __init__(self, a):", "= A(1, 2) m = marshal_csv(a) assert m[0] == ['a', 1], m assert", "_marshal_csv_dict = True def __init__(self, a, b): self.a = a self.b = b", "self.a = a self.b = b a = A(1, 2) m = marshal_csv(a)", "m = marshal_csv(d) u = unmarshal_csv(m, D) assert u.a[0].a == 1, u.a[0] def", "b): self.a = a self.b = b a = A(1, 2) m =", "m[1] == ['b', 2], m u = unmarshal_csv(m, A) assert u.a == 1,", "= unmarshal_csv(m, A) assert u.a == 1, u.a assert u.b == 2, u.b", "Test) assert u[0].a == \"a\", u[0].a def test__marshal_list_row_header(): class Test: _marshal_list_row_header = \"abc\"", "A: _marshal_list_row_header = \"a\" def __init__(self, a, b): self.a = a self.b =", "test_marshal_unmarshal_list(): class Test: def __init__(self, a, b): self.a = a self.b = b", "2], [\"abc\", \"b\", 3]], m def test_unmarshal_csv(): class A: _marshal_list_row_header = \"a\" def", "u.b def test_csv_cast_empty_str_to_none(): func = csv_cast_empty_str_to_none(int) assert func('') is None assert func('23') ==", "marshal_csv(d) u = unmarshal_csv(m, D) assert u.a[0].a == 1, u.a[0] def test_unmarshal_csv_raises_attribute_error(): class", "def __init__(self, a, b): self.a = a self.b = b class C: _marshal_list_row_header", "'b', 'type': B} _unmarshal_csv_singletons = {'c': {'arg_name': 'c', 'type': C}} def __init__(self, a,", "c def __iter__(self): for x in self.a: yield x for x in self.b:", "__init__(self, a, b): self.a = a self.b = b class C: _marshal_list_row_header =", "__iter__(self): for x in self.a: yield x for x in self.b: yield x", "b a = A(1, 2) m = marshal_csv(a) assert m[0] == ['a', 1],", "class B: def __init__(self, a, b): self.a = a self.b = b class", "== 1, u.a assert u.b == 2, u.b def test_csv_cast_empty_str_to_none(): func = csv_cast_empty_str_to_none(int)", "b u = [Test(\"a\", 2), Test(\"b\", 3)] m = marshal_csv(u) assert m ==", "{'c': {'arg_name': 'c', 'type': C}} def __init__(self, a, b, c): self.a = a", "self.b = b class D: _unmarshal_csv_map = {'a': {'arg_name': 'a', 'type': A}} _unmarshal_csv_default_arg", "assert u[0].a == \"a\", u[0].a def test__marshal_list_row_header(): class Test: _marshal_list_row_header = \"abc\" def", "D([A(1, 2)], [B(3, 4)], C(5, 6)) m = marshal_csv(d) u = unmarshal_csv(m, D)", "= a self.b = b class D: _unmarshal_csv_map = {'a': {'arg_name': 'a', 'type':", "== [[\"abc\", \"a\", 2], [\"abc\", \"b\", 3]], m def test_unmarshal_csv(): class A: _marshal_list_row_header", "= D([A(1, 2)], [B(3, 4)], C(5, 6)) m = marshal_csv(d) u = unmarshal_csv(m,", "def __init__(self, a, b): self.a = a self.b = b class D: _unmarshal_csv_map", "object}, } def __init__(self, a): self.a = a with pytest.raises(ValueError): unmarshal_csv([[1, 2]], A)", "'a': {'arg_name': 'a', 'type': object}, } def __init__(self, a): self.a = a with", "= {'c': {'arg_name': 'c', 'type': C}} def __init__(self, a, b, c): self.a =", "2)], [B(3, 4)], C(5, 6)) m = marshal_csv(d) u = unmarshal_csv(m, D) assert", "assert m[0] == ['a', 1], m assert m[1] == ['b', 2], m u", "import pytest def test_marshal_unmarshal_list(): class Test: def __init__(self, a, b): self.a = a", "self.b = b self.c = c def __iter__(self): for x in self.a: yield", "self.b = b class B: def __init__(self, a, b): self.a = a self.b", "== 2, u.b def test_csv_cast_empty_str_to_none(): func = csv_cast_empty_str_to_none(int) assert func('') is None assert", "= { 'a': {'arg_name': 'a', 'type': object}, } def __init__(self, a): self.a =", "with pytest.raises(AttributeError): unmarshal_csv([], A) def test_unmarshal_csv_raises_value_error(): class A: _unmarshal_csv_map = { 'a': {'arg_name':", "a = A(1, 2) m = marshal_csv(a) assert m[0] == ['a', 1], m", "__init__(self, a, b): self.a = a self.b = b class B: def __init__(self,", "True def __init__(self, a, b): self.a = a self.b = b a =", "3)] m = marshal_csv(u) assert m == [[\"abc\", \"a\", 2], [\"abc\", \"b\", 3]],", "u.a assert u.b == 2, u.b def test_csv_cast_empty_str_to_none(): func = csv_cast_empty_str_to_none(int) assert func('')", "a, b, c): self.a = a self.b = b self.c = c def", "= {'a': {'arg_name': 'a', 'type': A}} _unmarshal_csv_default_arg = {'arg_name': 'b', 'type': B} _unmarshal_csv_singletons", "2, u.b def test_csv_cast_empty_str_to_none(): func = csv_cast_empty_str_to_none(int) assert func('') is None assert func('23')", "B} _unmarshal_csv_singletons = {'c': {'arg_name': 'c', 'type': C}} def __init__(self, a, b, c):", "Test: _marshal_list_row_header = \"abc\" def __init__(self, a, b): self.a = a self.b =", "assert m[1] == ['b', 2], m u = unmarshal_csv(m, A) assert u.a ==", "assert m[0][0] == \"a\", m[0][0] u = unmarshal_csv_list(m, Test) assert u[0].a == \"a\",", "class Test: def __init__(self, a, b): self.a = a self.b = b u", "pymarshal.csv import * import pytest def test_marshal_unmarshal_list(): class Test: def __init__(self, a, b):", "__init__(self, a, b): self.a = a self.b = b a = A(1, 2)", "self.a = a self.b = b class B: def __init__(self, a, b): self.a", "= marshal_csv(u) assert m[0][0] == \"a\", m[0][0] u = unmarshal_csv_list(m, Test) assert u[0].a", "'a', 'type': object}, } def __init__(self, a): self.a = a with pytest.raises(ValueError): unmarshal_csv([[1,", "[\"abc\", \"b\", 3]], m def test_unmarshal_csv(): class A: _marshal_list_row_header = \"a\" def __init__(self,", "a, b): self.a = a self.b = b class B: def __init__(self, a,", "'c', 'type': C}} def __init__(self, a, b, c): self.a = a self.b =", "a, b): self.a = a self.b = b class D: _unmarshal_csv_map = {'a':", "\"a\", 2], [\"abc\", \"b\", 3]], m def test_unmarshal_csv(): class A: _marshal_list_row_header = \"a\"", "\"a\", u[0].a def test__marshal_list_row_header(): class Test: _marshal_list_row_header = \"abc\" def __init__(self, a, b):", "= a with pytest.raises(ValueError): unmarshal_csv([[1, 2]], A) def test_marshal_csv_dict(): class A: _marshal_csv_dict =", "__init__(self, a): self.a = a with pytest.raises(ValueError): unmarshal_csv([[1, 2]], A) def test_marshal_csv_dict(): class", "[B(3, 4)], C(5, 6)) m = marshal_csv(d) u = unmarshal_csv(m, D) assert u.a[0].a", "b, c): self.a = a self.b = b self.c = c def __iter__(self):", "marshal_csv(u) assert m[0][0] == \"a\", m[0][0] u = unmarshal_csv_list(m, Test) assert u[0].a ==", "{'arg_name': 'b', 'type': B} _unmarshal_csv_singletons = {'c': {'arg_name': 'c', 'type': C}} def __init__(self,", "in self.a: yield x for x in self.b: yield x yield self.c d", "_unmarshal_csv_map = { 'a': {'arg_name': 'a', 'type': object}, } def __init__(self, a): self.a", "self.c = c def __iter__(self): for x in self.a: yield x for x", "b class B: def __init__(self, a, b): self.a = a self.b = b", "= [Test(\"a\", 2), Test(\"b\", 3)] assert u[0].a == \"a\", u[0].a m = marshal_csv(u)", "a self.b = b a = A(1, 2) m = marshal_csv(a) assert m[0]", "* import pytest def test_marshal_unmarshal_list(): class Test: def __init__(self, a, b): self.a =", "marshal_csv(u) assert m == [[\"abc\", \"a\", 2], [\"abc\", \"b\", 3]], m def test_unmarshal_csv():", "pass with pytest.raises(AttributeError): unmarshal_csv([], A) def test_unmarshal_csv_raises_value_error(): class A: _unmarshal_csv_map = { 'a':", "\"abc\" def __init__(self, a, b): self.a = a self.b = b u =", "u[0].a def test__marshal_list_row_header(): class Test: _marshal_list_row_header = \"abc\" def __init__(self, a, b): self.a", "def test_unmarshal_csv_raises_attribute_error(): class A: pass with pytest.raises(AttributeError): unmarshal_csv([], A) def test_unmarshal_csv_raises_value_error(): class A:", "A: _marshal_csv_dict = True def __init__(self, a, b): self.a = a self.b =", "= marshal_csv(d) u = unmarshal_csv(m, D) assert u.a[0].a == 1, u.a[0] def test_unmarshal_csv_raises_attribute_error():", "1], m assert m[1] == ['b', 2], m u = unmarshal_csv(m, A) assert", "== ['b', 2], m u = unmarshal_csv(m, A) assert u.a == 1, u.a", "'type': C}} def __init__(self, a, b, c): self.a = a self.b = b", "Test(\"b\", 3)] assert u[0].a == \"a\", u[0].a m = marshal_csv(u) assert m[0][0] ==", "def __init__(self, a, b, c): self.a = a self.b = b self.c =", "assert u[0].a == \"a\", u[0].a m = marshal_csv(u) assert m[0][0] == \"a\", m[0][0]", "A: pass with pytest.raises(AttributeError): unmarshal_csv([], A) def test_unmarshal_csv_raises_value_error(): class A: _unmarshal_csv_map = {", "self.b = b class C: _marshal_list_row_header = \"c\" def __init__(self, a, b): self.a", "u = unmarshal_csv(m, A) assert u.a == 1, u.a assert u.b == 2,", "m assert m[1] == ['b', 2], m u = unmarshal_csv(m, A) assert u.a", "x in self.a: yield x for x in self.b: yield x yield self.c", "a, b): self.a = a self.b = b class C: _marshal_list_row_header = \"c\"", "self.a = a self.b = b u = [Test(\"a\", 2), Test(\"b\", 3)] m", "yield x for x in self.b: yield x yield self.c d = D([A(1,", "d = D([A(1, 2)], [B(3, 4)], C(5, 6)) m = marshal_csv(d) u =", "b class C: _marshal_list_row_header = \"c\" def __init__(self, a, b): self.a = a", "D: _unmarshal_csv_map = {'a': {'arg_name': 'a', 'type': A}} _unmarshal_csv_default_arg = {'arg_name': 'b', 'type':", "def __iter__(self): for x in self.a: yield x for x in self.b: yield", "class A: _marshal_csv_dict = True def __init__(self, a, b): self.a = a self.b", "def test_csv_cast_empty_str_to_none(): func = csv_cast_empty_str_to_none(int) assert func('') is None assert func('23') == 23", "= b class C: _marshal_list_row_header = \"c\" def __init__(self, a, b): self.a =", "u[0].a m = marshal_csv(u) assert m[0][0] == \"a\", m[0][0] u = unmarshal_csv_list(m, Test)", "with pytest.raises(ValueError): unmarshal_csv([[1, 2]], A) def test_marshal_csv_dict(): class A: _marshal_csv_dict = True def", "yield self.c d = D([A(1, 2)], [B(3, 4)], C(5, 6)) m = marshal_csv(d)", "self.b = b u = [Test(\"a\", 2), Test(\"b\", 3)] assert u[0].a == \"a\",", "\"a\", m[0][0] u = unmarshal_csv_list(m, Test) assert u[0].a == \"a\", u[0].a def test__marshal_list_row_header():", "= marshal_csv(u) assert m == [[\"abc\", \"a\", 2], [\"abc\", \"b\", 3]], m def", "{'arg_name': 'c', 'type': C}} def __init__(self, a, b, c): self.a = a self.b", "def test__marshal_list_row_header(): class Test: _marshal_list_row_header = \"abc\" def __init__(self, a, b): self.a =", "a self.b = b class B: def __init__(self, a, b): self.a = a", "m = marshal_csv(u) assert m == [[\"abc\", \"a\", 2], [\"abc\", \"b\", 3]], m", "D) assert u.a[0].a == 1, u.a[0] def test_unmarshal_csv_raises_attribute_error(): class A: pass with pytest.raises(AttributeError):", "A) def test_marshal_csv_dict(): class A: _marshal_csv_dict = True def __init__(self, a, b): self.a", "2) m = marshal_csv(a) assert m[0] == ['a', 1], m assert m[1] ==", "a self.b = b u = [Test(\"a\", 2), Test(\"b\", 3)] m = marshal_csv(u)", "= b u = [Test(\"a\", 2), Test(\"b\", 3)] m = marshal_csv(u) assert m", "= b a = A(1, 2) m = marshal_csv(a) assert m[0] == ['a',", "a self.b = b class C: _marshal_list_row_header = \"c\" def __init__(self, a, b):", "a): self.a = a with pytest.raises(ValueError): unmarshal_csv([[1, 2]], A) def test_marshal_csv_dict(): class A:", "unmarshal_csv(m, A) assert u.a == 1, u.a assert u.b == 2, u.b def", "assert m == [[\"abc\", \"a\", 2], [\"abc\", \"b\", 3]], m def test_unmarshal_csv(): class", "= unmarshal_csv(m, D) assert u.a[0].a == 1, u.a[0] def test_unmarshal_csv_raises_attribute_error(): class A: pass", "['a', 1], m assert m[1] == ['b', 2], m u = unmarshal_csv(m, A)", "class D: _unmarshal_csv_map = {'a': {'arg_name': 'a', 'type': A}} _unmarshal_csv_default_arg = {'arg_name': 'b',", "m u = unmarshal_csv(m, A) assert u.a == 1, u.a assert u.b ==", "{'a': {'arg_name': 'a', 'type': A}} _unmarshal_csv_default_arg = {'arg_name': 'b', 'type': B} _unmarshal_csv_singletons =", "__init__(self, a, b): self.a = a self.b = b class D: _unmarshal_csv_map =", "unmarshal_csv([[1, 2]], A) def test_marshal_csv_dict(): class A: _marshal_csv_dict = True def __init__(self, a,", "test_unmarshal_csv(): class A: _marshal_list_row_header = \"a\" def __init__(self, a, b): self.a = a", "a self.b = b class D: _unmarshal_csv_map = {'a': {'arg_name': 'a', 'type': A}}", "unmarshal_csv_list(m, Test) assert u[0].a == \"a\", u[0].a def test__marshal_list_row_header(): class Test: _marshal_list_row_header =", "b self.c = c def __iter__(self): for x in self.a: yield x for", "from pymarshal.csv import * import pytest def test_marshal_unmarshal_list(): class Test: def __init__(self, a,", "a self.b = b self.c = c def __iter__(self): for x in self.a:", "Test(\"b\", 3)] m = marshal_csv(u) assert m == [[\"abc\", \"a\", 2], [\"abc\", \"b\",", "c): self.a = a self.b = b self.c = c def __iter__(self): for", "test_unmarshal_csv_raises_attribute_error(): class A: pass with pytest.raises(AttributeError): unmarshal_csv([], A) def test_unmarshal_csv_raises_value_error(): class A: _unmarshal_csv_map", "test_unmarshal_csv_raises_value_error(): class A: _unmarshal_csv_map = { 'a': {'arg_name': 'a', 'type': object}, } def", "class Test: _marshal_list_row_header = \"abc\" def __init__(self, a, b): self.a = a self.b", "= True def __init__(self, a, b): self.a = a self.b = b a", "m[0] == ['a', 1], m assert m[1] == ['b', 2], m u =", "= b class D: _unmarshal_csv_map = {'a': {'arg_name': 'a', 'type': A}} _unmarshal_csv_default_arg =", "A) def test_unmarshal_csv_raises_value_error(): class A: _unmarshal_csv_map = { 'a': {'arg_name': 'a', 'type': object},", "u.b == 2, u.b def test_csv_cast_empty_str_to_none(): func = csv_cast_empty_str_to_none(int) assert func('') is None", "_unmarshal_csv_default_arg = {'arg_name': 'b', 'type': B} _unmarshal_csv_singletons = {'c': {'arg_name': 'c', 'type': C}}", "test__marshal_list_row_header(): class Test: _marshal_list_row_header = \"abc\" def __init__(self, a, b): self.a = a", "u = unmarshal_csv(m, D) assert u.a[0].a == 1, u.a[0] def test_unmarshal_csv_raises_attribute_error(): class A:", "3)] assert u[0].a == \"a\", u[0].a m = marshal_csv(u) assert m[0][0] == \"a\",", "__init__(self, a, b): self.a = a self.b = b u = [Test(\"a\", 2),", "C(5, 6)) m = marshal_csv(d) u = unmarshal_csv(m, D) assert u.a[0].a == 1,", "{'arg_name': 'a', 'type': object}, } def __init__(self, a): self.a = a with pytest.raises(ValueError):", "class A: pass with pytest.raises(AttributeError): unmarshal_csv([], A) def test_unmarshal_csv_raises_value_error(): class A: _unmarshal_csv_map =", "def __init__(self, a, b): self.a = a self.b = b a = A(1,", "[Test(\"a\", 2), Test(\"b\", 3)] m = marshal_csv(u) assert m == [[\"abc\", \"a\", 2],", "self.a = a self.b = b u = [Test(\"a\", 2), Test(\"b\", 3)] assert", "u.a[0].a == 1, u.a[0] def test_unmarshal_csv_raises_attribute_error(): class A: pass with pytest.raises(AttributeError): unmarshal_csv([], A)", "def test_unmarshal_csv_raises_value_error(): class A: _unmarshal_csv_map = { 'a': {'arg_name': 'a', 'type': object}, }", "6)) m = marshal_csv(d) u = unmarshal_csv(m, D) assert u.a[0].a == 1, u.a[0]", "m[0][0] u = unmarshal_csv_list(m, Test) assert u[0].a == \"a\", u[0].a def test__marshal_list_row_header(): class", "b): self.a = a self.b = b class C: _marshal_list_row_header = \"c\" def", "import * import pytest def test_marshal_unmarshal_list(): class Test: def __init__(self, a, b): self.a", "x in self.b: yield x yield self.c d = D([A(1, 2)], [B(3, 4)],", "__init__(self, a, b, c): self.a = a self.b = b self.c = c", "3]], m def test_unmarshal_csv(): class A: _marshal_list_row_header = \"a\" def __init__(self, a, b):", "u[0].a == \"a\", u[0].a m = marshal_csv(u) assert m[0][0] == \"a\", m[0][0] u", "= b u = [Test(\"a\", 2), Test(\"b\", 3)] assert u[0].a == \"a\", u[0].a", "\"a\", u[0].a m = marshal_csv(u) assert m[0][0] == \"a\", m[0][0] u = unmarshal_csv_list(m,", "} def __init__(self, a): self.a = a with pytest.raises(ValueError): unmarshal_csv([[1, 2]], A) def", "['b', 2], m u = unmarshal_csv(m, A) assert u.a == 1, u.a assert", "[[\"abc\", \"a\", 2], [\"abc\", \"b\", 3]], m def test_unmarshal_csv(): class A: _marshal_list_row_header =", "pytest def test_marshal_unmarshal_list(): class Test: def __init__(self, a, b): self.a = a self.b", "A}} _unmarshal_csv_default_arg = {'arg_name': 'b', 'type': B} _unmarshal_csv_singletons = {'c': {'arg_name': 'c', 'type':", "2]], A) def test_marshal_csv_dict(): class A: _marshal_csv_dict = True def __init__(self, a, b):", "4)], C(5, 6)) m = marshal_csv(d) u = unmarshal_csv(m, D) assert u.a[0].a ==", "'type': object}, } def __init__(self, a): self.a = a with pytest.raises(ValueError): unmarshal_csv([[1, 2]],", "== \"a\", m[0][0] u = unmarshal_csv_list(m, Test) assert u[0].a == \"a\", u[0].a def", "in self.b: yield x yield self.c d = D([A(1, 2)], [B(3, 4)], C(5,", "b u = [Test(\"a\", 2), Test(\"b\", 3)] assert u[0].a == \"a\", u[0].a m", "_unmarshal_csv_singletons = {'c': {'arg_name': 'c', 'type': C}} def __init__(self, a, b, c): self.a", "_marshal_list_row_header = \"a\" def __init__(self, a, b): self.a = a self.b = b", "\"b\", 3]], m def test_unmarshal_csv(): class A: _marshal_list_row_header = \"a\" def __init__(self, a,", "'a', 'type': A}} _unmarshal_csv_default_arg = {'arg_name': 'b', 'type': B} _unmarshal_csv_singletons = {'c': {'arg_name':", "u.a == 1, u.a assert u.b == 2, u.b def test_csv_cast_empty_str_to_none(): func =", "b): self.a = a self.b = b class D: _unmarshal_csv_map = {'a': {'arg_name':", "def __init__(self, a): self.a = a with pytest.raises(ValueError): unmarshal_csv([[1, 2]], A) def test_marshal_csv_dict():", "self.b = b a = A(1, 2) m = marshal_csv(a) assert m[0] ==", "2), Test(\"b\", 3)] assert u[0].a == \"a\", u[0].a m = marshal_csv(u) assert m[0][0]", "_unmarshal_csv_map = {'a': {'arg_name': 'a', 'type': A}} _unmarshal_csv_default_arg = {'arg_name': 'b', 'type': B}", "== \"a\", u[0].a m = marshal_csv(u) assert m[0][0] == \"a\", m[0][0] u =", "a, b): self.a = a self.b = b a = A(1, 2) m", "a, b): self.a = a self.b = b u = [Test(\"a\", 2), Test(\"b\",", "= \"c\" def __init__(self, a, b): self.a = a self.b = b class", "\"a\" def __init__(self, a, b): self.a = a self.b = b class B:", "B: def __init__(self, a, b): self.a = a self.b = b class C:", "self.a: yield x for x in self.b: yield x yield self.c d =", "2], m u = unmarshal_csv(m, A) assert u.a == 1, u.a assert u.b", "b): self.a = a self.b = b u = [Test(\"a\", 2), Test(\"b\", 3)]", "self.a = a self.b = b self.c = c def __iter__(self): for x", "self.b: yield x yield self.c d = D([A(1, 2)], [B(3, 4)], C(5, 6))", "= {'arg_name': 'b', 'type': B} _unmarshal_csv_singletons = {'c': {'arg_name': 'c', 'type': C}} def", "= c def __iter__(self): for x in self.a: yield x for x in", "test_marshal_csv_dict(): class A: _marshal_csv_dict = True def __init__(self, a, b): self.a = a", "marshal_csv(a) assert m[0] == ['a', 1], m assert m[1] == ['b', 2], m", "u = [Test(\"a\", 2), Test(\"b\", 3)] assert u[0].a == \"a\", u[0].a m =", "A) assert u.a == 1, u.a assert u.b == 2, u.b def test_csv_cast_empty_str_to_none():", "assert u.b == 2, u.b def test_csv_cast_empty_str_to_none(): func = csv_cast_empty_str_to_none(int) assert func('') is", "self.b = b u = [Test(\"a\", 2), Test(\"b\", 3)] m = marshal_csv(u) assert", "unmarshal_csv([], A) def test_unmarshal_csv_raises_value_error(): class A: _unmarshal_csv_map = { 'a': {'arg_name': 'a', 'type':", "\"c\" def __init__(self, a, b): self.a = a self.b = b class D:", "for x in self.a: yield x for x in self.b: yield x yield", "{'arg_name': 'a', 'type': A}} _unmarshal_csv_default_arg = {'arg_name': 'b', 'type': B} _unmarshal_csv_singletons = {'c':", "for x in self.b: yield x yield self.c d = D([A(1, 2)], [B(3,", "def test_marshal_csv_dict(): class A: _marshal_csv_dict = True def __init__(self, a, b): self.a =", "def test_marshal_unmarshal_list(): class Test: def __init__(self, a, b): self.a = a self.b =", "= a self.b = b u = [Test(\"a\", 2), Test(\"b\", 3)] m =", "self.a = a self.b = b class C: _marshal_list_row_header = \"c\" def __init__(self,", "'type': A}} _unmarshal_csv_default_arg = {'arg_name': 'b', 'type': B} _unmarshal_csv_singletons = {'c': {'arg_name': 'c',", "C}} def __init__(self, a, b, c): self.a = a self.b = b self.c", "1, u.a[0] def test_unmarshal_csv_raises_attribute_error(): class A: pass with pytest.raises(AttributeError): unmarshal_csv([], A) def test_unmarshal_csv_raises_value_error():", "u = unmarshal_csv_list(m, Test) assert u[0].a == \"a\", u[0].a def test__marshal_list_row_header(): class Test:", "def test_unmarshal_csv(): class A: _marshal_list_row_header = \"a\" def __init__(self, a, b): self.a =", "{ 'a': {'arg_name': 'a', 'type': object}, } def __init__(self, a): self.a = a", "x for x in self.b: yield x yield self.c d = D([A(1, 2)],", "A(1, 2) m = marshal_csv(a) assert m[0] == ['a', 1], m assert m[1]", "m = marshal_csv(u) assert m[0][0] == \"a\", m[0][0] u = unmarshal_csv_list(m, Test) assert", "m == [[\"abc\", \"a\", 2], [\"abc\", \"b\", 3]], m def test_unmarshal_csv(): class A:", "assert u.a[0].a == 1, u.a[0] def test_unmarshal_csv_raises_attribute_error(): class A: pass with pytest.raises(AttributeError): unmarshal_csv([],", "m def test_unmarshal_csv(): class A: _marshal_list_row_header = \"a\" def __init__(self, a, b): self.a", "'type': B} _unmarshal_csv_singletons = {'c': {'arg_name': 'c', 'type': C}} def __init__(self, a, b,", "= \"a\" def __init__(self, a, b): self.a = a self.b = b class", "b class D: _unmarshal_csv_map = {'a': {'arg_name': 'a', 'type': A}} _unmarshal_csv_default_arg = {'arg_name':", "x yield self.c d = D([A(1, 2)], [B(3, 4)], C(5, 6)) m =", "= a self.b = b class B: def __init__(self, a, b): self.a =", "pytest.raises(AttributeError): unmarshal_csv([], A) def test_unmarshal_csv_raises_value_error(): class A: _unmarshal_csv_map = { 'a': {'arg_name': 'a',", "= b class B: def __init__(self, a, b): self.a = a self.b =", "unmarshal_csv(m, D) assert u.a[0].a == 1, u.a[0] def test_unmarshal_csv_raises_attribute_error(): class A: pass with", "a self.b = b u = [Test(\"a\", 2), Test(\"b\", 3)] assert u[0].a ==", "pytest.raises(ValueError): unmarshal_csv([[1, 2]], A) def test_marshal_csv_dict(): class A: _marshal_csv_dict = True def __init__(self,", "def __init__(self, a, b): self.a = a self.b = b class B: def", "C: _marshal_list_row_header = \"c\" def __init__(self, a, b): self.a = a self.b =", "class C: _marshal_list_row_header = \"c\" def __init__(self, a, b): self.a = a self.b", "= a self.b = b u = [Test(\"a\", 2), Test(\"b\", 3)] assert u[0].a", "class A: _unmarshal_csv_map = { 'a': {'arg_name': 'a', 'type': object}, } def __init__(self,", "1, u.a assert u.b == 2, u.b def test_csv_cast_empty_str_to_none(): func = csv_cast_empty_str_to_none(int) assert", "[Test(\"a\", 2), Test(\"b\", 3)] assert u[0].a == \"a\", u[0].a m = marshal_csv(u) assert", "def __init__(self, a, b): self.a = a self.b = b u = [Test(\"a\",", "u[0].a == \"a\", u[0].a def test__marshal_list_row_header(): class Test: _marshal_list_row_header = \"abc\" def __init__(self,", "2), Test(\"b\", 3)] m = marshal_csv(u) assert m == [[\"abc\", \"a\", 2], [\"abc\",", "self.c d = D([A(1, 2)], [B(3, 4)], C(5, 6)) m = marshal_csv(d) u", "= a self.b = b a = A(1, 2) m = marshal_csv(a) assert", "= marshal_csv(a) assert m[0] == ['a', 1], m assert m[1] == ['b', 2],", "self.a = a with pytest.raises(ValueError): unmarshal_csv([[1, 2]], A) def test_marshal_csv_dict(): class A: _marshal_csv_dict", "self.a = a self.b = b class D: _unmarshal_csv_map = {'a': {'arg_name': 'a',", "b): self.a = a self.b = b class B: def __init__(self, a, b):", "== \"a\", u[0].a def test__marshal_list_row_header(): class Test: _marshal_list_row_header = \"abc\" def __init__(self, a,", "a with pytest.raises(ValueError): unmarshal_csv([[1, 2]], A) def test_marshal_csv_dict(): class A: _marshal_csv_dict = True" ]
[ "wants to get an item. If it returns false, the item will not", "equip what. Args: entity (Entity): The item being equipped. gearset (gearset): The gearset", "destination inventory. Returns: get (bool): Whether to get it or not. \"\"\" return", "The Aspect system is SUPPOSED to handle things like Character Classes, Final Fantasy", "return self.name def at_before_equip(self, entity, gearset, slot): \"\"\" This is called whenever the", "is SUPPOSED to handle things like Character Classes, Final Fantasy Jobs, Professions, etc.", "what. Args: entity (Entity): The item being nabbed. inventory (Inventory): The proposed destination", "carry what. Args: entity (Entity): The item being nabbed. inventory (Inventory): The proposed", "at_before_get(self, entity, inventory): \"\"\" This is called whenever the owner wants to get", "to handle things like Character Classes, Final Fantasy Jobs, Professions, etc. Also Species/Races.", "returns false, the item will not be obtained. Override this hook to implement", "Final Fantasy Jobs, Professions, etc. Also Species/Races. We'll see how well that works", "__init__(self, handler, slot, in_data=None): self.persistent = handler.owner.persistent self.handler = handler self.slot = slot", "def __str__(self): return self.name def at_before_equip(self, entity, gearset, slot): \"\"\" This is called", "the owner wants to get an item. If it returns false, the item", "\"\"\" This is called whenever the owner wants to equip an item. If", "false, the item will not be equipped. Override this hook to implement Aspect-specified", "This is called whenever the owner wants to get an item. If it", "to equip an item. If it returns false, the item will not be", "called whenever the owner wants to get an item. If it returns false,", "self.data = in_data def __str__(self): return self.name def at_before_equip(self, entity, gearset, slot): \"\"\"", "the item will not be equipped. Override this hook to implement Aspect-specified rules", "is called whenever the owner wants to equip an item. If it returns", "at_before_equip(self, entity, gearset, slot): \"\"\" This is called whenever the owner wants to", "if self.persistent: in_data = self.handler.owner.attributes.get(key=slot, category='aspect', default=dict()) else: in_data = dict() self.data =", "wants to equip an item. If it returns false, the item will not", "hook to implement Aspect-specified rules about who can equip what. Args: entity (Entity):", "(Inventory): The proposed destination inventory. Returns: get (bool): Whether to get it or", "an item. If it returns false, the item will not be obtained. Override", "default=dict()) else: in_data = dict() self.data = in_data def __str__(self): return self.name def", "(bool): Whether to equip it or not. \"\"\" return True def at_before_get(self, entity,", "be obtained. Override this hook to implement Aspect-specified rules about who can carry", "self.name def at_before_equip(self, entity, gearset, slot): \"\"\" This is called whenever the owner", "see how well that works out. \"\"\" class Aspect(object): name = \"Unknown Aspect\"", "being nabbed. inventory (Inventory): The proposed destination inventory. Returns: get (bool): Whether to", "like Character Classes, Final Fantasy Jobs, Professions, etc. Also Species/Races. We'll see how", "handler self.slot = slot if in_data is None: if self.persistent: in_data = self.handler.owner.attributes.get(key=slot,", "called whenever the owner wants to equip an item. If it returns false,", "item being equipped. gearset (gearset): The gearset being equipped to. slot (slot): The", "self.handler = handler self.slot = slot if in_data is None: if self.persistent: in_data", "item will not be obtained. Override this hook to implement Aspect-specified rules about", "in_data def __str__(self): return self.name def at_before_equip(self, entity, gearset, slot): \"\"\" This is", "slot being equipped to. Returns: equip (bool): Whether to equip it or not.", "about who can carry what. Args: entity (Entity): The item being nabbed. inventory", "equipped to. Returns: equip (bool): Whether to equip it or not. \"\"\" return", "rules about who can equip what. Args: entity (Entity): The item being equipped.", "(slot): The Gearset slot being equipped to. Returns: equip (bool): Whether to equip", "Professions, etc. Also Species/Races. We'll see how well that works out. \"\"\" class", "etc. Also Species/Races. We'll see how well that works out. \"\"\" class Aspect(object):", "Override this hook to implement Aspect-specified rules about who can equip what. Args:", "equipped to. slot (slot): The Gearset slot being equipped to. Returns: equip (bool):", "being equipped to. slot (slot): The Gearset slot being equipped to. Returns: equip", "whenever the owner wants to get an item. If it returns false, the", "how well that works out. \"\"\" class Aspect(object): name = \"Unknown Aspect\" def", "well that works out. \"\"\" class Aspect(object): name = \"Unknown Aspect\" def __init__(self,", "equip an item. If it returns false, the item will not be equipped.", "= handler.owner.persistent self.handler = handler self.slot = slot if in_data is None: if", "equip it or not. \"\"\" return True def at_before_get(self, entity, inventory): \"\"\" This", "owner wants to equip an item. If it returns false, the item will", "works out. \"\"\" class Aspect(object): name = \"Unknown Aspect\" def __init__(self, handler, slot,", "to implement Aspect-specified rules about who can equip what. Args: entity (Entity): The", "category='aspect', default=dict()) else: in_data = dict() self.data = in_data def __str__(self): return self.name", "nabbed. inventory (Inventory): The proposed destination inventory. Returns: get (bool): Whether to get", "entity (Entity): The item being nabbed. inventory (Inventory): The proposed destination inventory. Returns:", "proposed destination inventory. Returns: get (bool): Whether to get it or not. \"\"\"", "inventory): \"\"\" This is called whenever the owner wants to get an item.", "= \"Unknown Aspect\" def __init__(self, handler, slot, in_data=None): self.persistent = handler.owner.persistent self.handler =", "in_data is None: if self.persistent: in_data = self.handler.owner.attributes.get(key=slot, category='aspect', default=dict()) else: in_data =", "system is SUPPOSED to handle things like Character Classes, Final Fantasy Jobs, Professions,", "handler, slot, in_data=None): self.persistent = handler.owner.persistent self.handler = handler self.slot = slot if", "being equipped. gearset (gearset): The gearset being equipped to. slot (slot): The Gearset", "self.slot = slot if in_data is None: if self.persistent: in_data = self.handler.owner.attributes.get(key=slot, category='aspect',", "equipped. gearset (gearset): The gearset being equipped to. slot (slot): The Gearset slot", "that works out. \"\"\" class Aspect(object): name = \"Unknown Aspect\" def __init__(self, handler,", "entity (Entity): The item being equipped. gearset (gearset): The gearset being equipped to.", "equipped. Override this hook to implement Aspect-specified rules about who can equip what.", "to get an item. If it returns false, the item will not be", "If it returns false, the item will not be equipped. Override this hook", "(Entity): The item being nabbed. inventory (Inventory): The proposed destination inventory. Returns: get", "equip (bool): Whether to equip it or not. \"\"\" return True def at_before_get(self,", "The gearset being equipped to. slot (slot): The Gearset slot being equipped to.", "will not be equipped. Override this hook to implement Aspect-specified rules about who", "whenever the owner wants to equip an item. If it returns false, the", "entity, inventory): \"\"\" This is called whenever the owner wants to get an", "item. If it returns false, the item will not be equipped. Override this", "self.persistent = handler.owner.persistent self.handler = handler self.slot = slot if in_data is None:", "None: if self.persistent: in_data = self.handler.owner.attributes.get(key=slot, category='aspect', default=dict()) else: in_data = dict() self.data", "or not. \"\"\" return True def at_before_get(self, entity, inventory): \"\"\" This is called", "inventory (Inventory): The proposed destination inventory. Returns: get (bool): Whether to get it", "is None: if self.persistent: in_data = self.handler.owner.attributes.get(key=slot, category='aspect', default=dict()) else: in_data = dict()", "an item. If it returns false, the item will not be equipped. Override", "class Aspect(object): name = \"Unknown Aspect\" def __init__(self, handler, slot, in_data=None): self.persistent =", "to equip it or not. \"\"\" return True def at_before_get(self, entity, inventory): \"\"\"", "be equipped. Override this hook to implement Aspect-specified rules about who can equip", "Whether to equip it or not. \"\"\" return True def at_before_get(self, entity, inventory):", "The proposed destination inventory. Returns: get (bool): Whether to get it or not.", "gearset (gearset): The gearset being equipped to. slot (slot): The Gearset slot being", "Aspect-specified rules about who can equip what. Args: entity (Entity): The item being", "(Entity): The item being equipped. gearset (gearset): The gearset being equipped to. slot", "Classes, Final Fantasy Jobs, Professions, etc. Also Species/Races. We'll see how well that", "Gearset slot being equipped to. Returns: equip (bool): Whether to equip it or", "else: in_data = dict() self.data = in_data def __str__(self): return self.name def at_before_equip(self,", "= self.handler.owner.attributes.get(key=slot, category='aspect', default=dict()) else: in_data = dict() self.data = in_data def __str__(self):", "the owner wants to equip an item. If it returns false, the item", "rules about who can carry what. Args: entity (Entity): The item being nabbed.", "The Gearset slot being equipped to. Returns: equip (bool): Whether to equip it", "entity, gearset, slot): \"\"\" This is called whenever the owner wants to equip", "\"\"\" class Aspect(object): name = \"Unknown Aspect\" def __init__(self, handler, slot, in_data=None): self.persistent", "Aspect(object): name = \"Unknown Aspect\" def __init__(self, handler, slot, in_data=None): self.persistent = handler.owner.persistent", "SUPPOSED to handle things like Character Classes, Final Fantasy Jobs, Professions, etc. Also", "in_data = self.handler.owner.attributes.get(key=slot, category='aspect', default=dict()) else: in_data = dict() self.data = in_data def", "out. \"\"\" class Aspect(object): name = \"Unknown Aspect\" def __init__(self, handler, slot, in_data=None):", "to. Returns: equip (bool): Whether to equip it or not. \"\"\" return True", "the item will not be obtained. Override this hook to implement Aspect-specified rules", "item being nabbed. inventory (Inventory): The proposed destination inventory. Returns: get (bool): Whether", "\"Unknown Aspect\" def __init__(self, handler, slot, in_data=None): self.persistent = handler.owner.persistent self.handler = handler", "inventory. Returns: get (bool): Whether to get it or not. \"\"\" return True", "gearset being equipped to. slot (slot): The Gearset slot being equipped to. Returns:", "\"\"\" return True def at_before_get(self, entity, inventory): \"\"\" This is called whenever the", "name = \"Unknown Aspect\" def __init__(self, handler, slot, in_data=None): self.persistent = handler.owner.persistent self.handler", "slot (slot): The Gearset slot being equipped to. Returns: equip (bool): Whether to", "false, the item will not be obtained. Override this hook to implement Aspect-specified", "True def at_before_get(self, entity, inventory): \"\"\" This is called whenever the owner wants", "implement Aspect-specified rules about who can carry what. Args: entity (Entity): The item", "We'll see how well that works out. \"\"\" class Aspect(object): name = \"Unknown", "to. slot (slot): The Gearset slot being equipped to. Returns: equip (bool): Whether", "in_data = dict() self.data = in_data def __str__(self): return self.name def at_before_equip(self, entity,", "can equip what. Args: entity (Entity): The item being equipped. gearset (gearset): The", "Args: entity (Entity): The item being nabbed. inventory (Inventory): The proposed destination inventory.", "who can equip what. Args: entity (Entity): The item being equipped. gearset (gearset):", "def at_before_equip(self, entity, gearset, slot): \"\"\" This is called whenever the owner wants", "not be obtained. Override this hook to implement Aspect-specified rules about who can", "= in_data def __str__(self): return self.name def at_before_equip(self, entity, gearset, slot): \"\"\" This", "Aspect\" def __init__(self, handler, slot, in_data=None): self.persistent = handler.owner.persistent self.handler = handler self.slot", "Fantasy Jobs, Professions, etc. Also Species/Races. We'll see how well that works out.", "Jobs, Professions, etc. Also Species/Races. We'll see how well that works out. \"\"\"", "implement Aspect-specified rules about who can equip what. Args: entity (Entity): The item", "who can carry what. Args: entity (Entity): The item being nabbed. inventory (Inventory):", "self.handler.owner.attributes.get(key=slot, category='aspect', default=dict()) else: in_data = dict() self.data = in_data def __str__(self): return", "slot if in_data is None: if self.persistent: in_data = self.handler.owner.attributes.get(key=slot, category='aspect', default=dict()) else:", "def __init__(self, handler, slot, in_data=None): self.persistent = handler.owner.persistent self.handler = handler self.slot =", "slot): \"\"\" This is called whenever the owner wants to equip an item.", "return True def at_before_get(self, entity, inventory): \"\"\" This is called whenever the owner", "get an item. If it returns false, the item will not be obtained.", "item. If it returns false, the item will not be obtained. Override this", "slot, in_data=None): self.persistent = handler.owner.persistent self.handler = handler self.slot = slot if in_data", "being equipped to. Returns: equip (bool): Whether to equip it or not. \"\"\"", "what. Args: entity (Entity): The item being equipped. gearset (gearset): The gearset being", "The item being nabbed. inventory (Inventory): The proposed destination inventory. Returns: get (bool):", "things like Character Classes, Final Fantasy Jobs, Professions, etc. Also Species/Races. We'll see", "dict() self.data = in_data def __str__(self): return self.name def at_before_equip(self, entity, gearset, slot):", "it returns false, the item will not be obtained. Override this hook to", "Species/Races. We'll see how well that works out. \"\"\" class Aspect(object): name =", "def at_before_get(self, entity, inventory): \"\"\" This is called whenever the owner wants to", "Character Classes, Final Fantasy Jobs, Professions, etc. Also Species/Races. We'll see how well", "Aspect system is SUPPOSED to handle things like Character Classes, Final Fantasy Jobs,", "not. \"\"\" return True def at_before_get(self, entity, inventory): \"\"\" This is called whenever", "(gearset): The gearset being equipped to. slot (slot): The Gearset slot being equipped", "\"\"\" The Aspect system is SUPPOSED to handle things like Character Classes, Final", "Returns: equip (bool): Whether to equip it or not. \"\"\" return True def", "it returns false, the item will not be equipped. Override this hook to", "item will not be equipped. Override this hook to implement Aspect-specified rules about", "it or not. \"\"\" return True def at_before_get(self, entity, inventory): \"\"\" This is", "obtained. Override this hook to implement Aspect-specified rules about who can carry what.", "= dict() self.data = in_data def __str__(self): return self.name def at_before_equip(self, entity, gearset,", "owner wants to get an item. If it returns false, the item will", "self.persistent: in_data = self.handler.owner.attributes.get(key=slot, category='aspect', default=dict()) else: in_data = dict() self.data = in_data", "This is called whenever the owner wants to equip an item. If it", "= handler self.slot = slot if in_data is None: if self.persistent: in_data =", "about who can equip what. Args: entity (Entity): The item being equipped. gearset", "Override this hook to implement Aspect-specified rules about who can carry what. Args:", "If it returns false, the item will not be obtained. Override this hook", "can carry what. Args: entity (Entity): The item being nabbed. inventory (Inventory): The", "Also Species/Races. We'll see how well that works out. \"\"\" class Aspect(object): name", "if in_data is None: if self.persistent: in_data = self.handler.owner.attributes.get(key=slot, category='aspect', default=dict()) else: in_data", "gearset, slot): \"\"\" This is called whenever the owner wants to equip an", "Args: entity (Entity): The item being equipped. gearset (gearset): The gearset being equipped", "this hook to implement Aspect-specified rules about who can carry what. Args: entity", "Aspect-specified rules about who can carry what. Args: entity (Entity): The item being", "hook to implement Aspect-specified rules about who can carry what. Args: entity (Entity):", "is called whenever the owner wants to get an item. If it returns", "in_data=None): self.persistent = handler.owner.persistent self.handler = handler self.slot = slot if in_data is", "returns false, the item will not be equipped. Override this hook to implement", "\"\"\" This is called whenever the owner wants to get an item. If", "handle things like Character Classes, Final Fantasy Jobs, Professions, etc. Also Species/Races. We'll", "__str__(self): return self.name def at_before_equip(self, entity, gearset, slot): \"\"\" This is called whenever", "<filename>athanor_entity/entities/aspects.py \"\"\" The Aspect system is SUPPOSED to handle things like Character Classes,", "will not be obtained. Override this hook to implement Aspect-specified rules about who", "to implement Aspect-specified rules about who can carry what. Args: entity (Entity): The", "handler.owner.persistent self.handler = handler self.slot = slot if in_data is None: if self.persistent:", "The item being equipped. gearset (gearset): The gearset being equipped to. slot (slot):", "this hook to implement Aspect-specified rules about who can equip what. Args: entity", "= slot if in_data is None: if self.persistent: in_data = self.handler.owner.attributes.get(key=slot, category='aspect', default=dict())", "not be equipped. Override this hook to implement Aspect-specified rules about who can" ]
[ "ClassificationModel @dataclass class BitFitConf: _target_: str = \"landscapes.models.meta.ft.BitFit\" model: Any = MISSING #", "typing import Any @dataclass class LinearProbeConf: _target_: str = \"landscapes.models.meta.ft.LinearProbe\" model: Any =", "@dataclass class BitFitConf: _target_: str = \"landscapes.models.meta.ft.BitFit\" model: Any = MISSING # ClassificationModel", "import Any @dataclass class LinearProbeConf: _target_: str = \"landscapes.models.meta.ft.LinearProbe\" model: Any = MISSING", "Any = MISSING # ClassificationModel @dataclass class BitFitConf: _target_: str = \"landscapes.models.meta.ft.BitFit\" model:", "class LinearProbeConf: _target_: str = \"landscapes.models.meta.ft.LinearProbe\" model: Any = MISSING # ClassificationModel @dataclass", "dataclass, field from omegaconf import MISSING from typing import Any @dataclass class LinearProbeConf:", "_target_: str = \"landscapes.models.meta.ft.LinearProbe\" model: Any = MISSING # ClassificationModel @dataclass class BitFitConf:", "# ClassificationModel @dataclass class BitFitConf: _target_: str = \"landscapes.models.meta.ft.BitFit\" model: Any = MISSING", "import dataclass, field from omegaconf import MISSING from typing import Any @dataclass class", "LinearProbeConf: _target_: str = \"landscapes.models.meta.ft.LinearProbe\" model: Any = MISSING # ClassificationModel @dataclass class", "import MISSING from typing import Any @dataclass class LinearProbeConf: _target_: str = \"landscapes.models.meta.ft.LinearProbe\"", "\"landscapes.models.meta.ft.LinearProbe\" model: Any = MISSING # ClassificationModel @dataclass class BitFitConf: _target_: str =", "@dataclass class LinearProbeConf: _target_: str = \"landscapes.models.meta.ft.LinearProbe\" model: Any = MISSING # ClassificationModel", "field from omegaconf import MISSING from typing import Any @dataclass class LinearProbeConf: _target_:", "omegaconf import MISSING from typing import Any @dataclass class LinearProbeConf: _target_: str =", "Any @dataclass class LinearProbeConf: _target_: str = \"landscapes.models.meta.ft.LinearProbe\" model: Any = MISSING #", "MISSING # ClassificationModel @dataclass class BitFitConf: _target_: str = \"landscapes.models.meta.ft.BitFit\" model: Any =", "str = \"landscapes.models.meta.ft.LinearProbe\" model: Any = MISSING # ClassificationModel @dataclass class BitFitConf: _target_:", "from typing import Any @dataclass class LinearProbeConf: _target_: str = \"landscapes.models.meta.ft.LinearProbe\" model: Any", "= \"landscapes.models.meta.ft.LinearProbe\" model: Any = MISSING # ClassificationModel @dataclass class BitFitConf: _target_: str", "from dataclasses import dataclass, field from omegaconf import MISSING from typing import Any", "MISSING from typing import Any @dataclass class LinearProbeConf: _target_: str = \"landscapes.models.meta.ft.LinearProbe\" model:", "from omegaconf import MISSING from typing import Any @dataclass class LinearProbeConf: _target_: str", "model: Any = MISSING # ClassificationModel @dataclass class BitFitConf: _target_: str = \"landscapes.models.meta.ft.BitFit\"", "= MISSING # ClassificationModel @dataclass class BitFitConf: _target_: str = \"landscapes.models.meta.ft.BitFit\" model: Any", "dataclasses import dataclass, field from omegaconf import MISSING from typing import Any @dataclass" ]
[ "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "The ASF licenses this file to You under the Apache License, Version 2.0", "log.addHandler(fh) log.addHandler(ch) return log @classmethod def create_watcher(cls,collector_path,new_file,logger): logger.info(\"Creating collector watcher\") event_handler = new_file", "for logger. fh = logging.FileHandler('SPOT.log') fh.setLevel(level=logging.DEBUG) fh.setFormatter(formatter) # reate console handler for logger.", "to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') if", "{0}\".format(hdfs_path) logger.info(\"SPOT.Utils: Creating hdfs folder: {0}\".format(hadoop_create_folder)) subprocess.call(hadoop_create_folder,shell=True) @classmethod def load_to_hdfs(cls,file_local_path,file_hdfs_path,logger): # move file", "hdfs folder: {0}\".format(hadoop_create_folder)) subprocess.call(hadoop_create_folder,shell=True) @classmethod def load_to_hdfs(cls,file_local_path,file_hdfs_path,logger): # move file to hdfs. load_to_hadoop_script", "def execute_cmd(cls,command,logger): try: logger.info(\"SPOT.Utils: Executing: {0}\".format(command)) subprocess.call(command,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There", "logger.error(\"SPOT.Utils: There was an error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_parameter(cls,parameter,message,logger): if parameter", "the NOTICE file distributed with # this work for additional information regarding copyright", "file to hdfs: {0}\".format(load_to_hadoop_script)) subprocess.call(load_to_hadoop_script,shell=True) @classmethod def get_logger(cls,logger_name,create_file=False): # create logger for prd_ci", "subprocess.call(hadoop_create_folder,shell=True) @classmethod def load_to_hdfs(cls,file_local_path,file_hdfs_path,logger): # move file to hdfs. load_to_hadoop_script = \"hadoop fs", "-moveFromLocal {0} {1}\".format(file_local_path,file_hdfs_path) logger.info(\"SPOT.Utils: Loading file to hdfs: {0}\".format(load_to_hadoop_script)) subprocess.call(load_to_hadoop_script,shell=True) @classmethod def get_logger(cls,logger_name,create_file=False):", "False return is_type_ok class NewFileEvent(FileSystemEventHandler): pipeline_instance = None def __init__(self,pipeline_instance): self.pipeline_instance = pipeline_instance", "dirs = os.walk(\"{0}/pipelines/\".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))).next()[1] is_type_ok = True if pipeline_type in dirs else False return", "file to hdfs. load_to_hadoop_script = \"hadoop fs -moveFromLocal {0} {1}\".format(file_local_path,file_hdfs_path) logger.info(\"SPOT.Utils: Loading file", "ANY KIND, either express or implied. # See the License for the specific", "watcher\") event_handler = new_file observer = Observer() observer.schedule(event_handler,collector_path) return observer @classmethod def execute_cmd(cls,command,logger):", "= new_file observer = Observer() observer.schedule(event_handler,collector_path) return observer @classmethod def execute_cmd(cls,command,logger): try: logger.info(\"SPOT.Utils:", "logging.getLogger(logger_name) log.setLevel(level=logging.INFO) # create formatter and add it to the handlers formatter =", "#!/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one", "if parameter == None or parameter == \"\": logger.error(message) sys.exit(1) @classmethod def creat_hdfs_folder(cls,hdfs_path,logger):", "import logging from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler class Util(object): @classmethod", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "work for additional information regarding copyright ownership. # The ASF licenses this file", "= None def __init__(self,pipeline_instance): self.pipeline_instance = pipeline_instance def on_moved(self,event): if not event.is_directory: self.pipeline_instance.new_file_detected(event.dest_path)", "parameter == None or parameter == \"\": logger.error(message) sys.exit(1) @classmethod def creat_hdfs_folder(cls,hdfs_path,logger): hadoop_create_folder=\"hadoop", "sys.exit(1) @classmethod def validate_parameter(cls,parameter,message,logger): if parameter == None or parameter == \"\": logger.error(message)", "subprocess.call(rm_kafka_topic,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There was an error executing: {0}\".format(e.cmd)) sys.exit(1)", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "get_logger(cls,logger_name,create_file=False): # create logger for prd_ci log = logging.getLogger(logger_name) log.setLevel(level=logging.INFO) # create formatter", "handlers to logger. if create_file: log.addHandler(fh) log.addHandler(ch) return log @classmethod def create_watcher(cls,collector_path,new_file,logger): logger.info(\"Creating", "OF ANY KIND, either express or implied. # See the License for the", "Executing: {0}\".format(command)) subprocess.call(command,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There was an error executing:", "reate console handler for logger. ch = logging.StreamHandler() ch.setLevel(level=logging.DEBUG) ch.setFormatter(formatter) # add handlers", "ownership. # The ASF licenses this file to You under the Apache License,", "{0}\".format(rm_kafka_topic)) subprocess.call(rm_kafka_topic,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There was an error executing: {0}\".format(e.cmd))", "regarding copyright ownership. # The ASF licenses this file to You under the", "Util(object): @classmethod def remove_kafka_topic(cls,zk,topic,logger): rm_kafka_topic = \"kafka-topics --delete --zookeeper {0} --topic {1}\".format(zk,topic) try:", "return observer @classmethod def execute_cmd(cls,command,logger): try: logger.info(\"SPOT.Utils: Executing: {0}\".format(command)) subprocess.call(command,shell=True) except subprocess.CalledProcessError as", "logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') if create_file: # create file handler", "= logging.StreamHandler() ch.setLevel(level=logging.DEBUG) ch.setFormatter(formatter) # add handlers to logger. if create_file: log.addHandler(fh) log.addHandler(ch)", "create file handler for logger. fh = logging.FileHandler('SPOT.log') fh.setLevel(level=logging.DEBUG) fh.setFormatter(formatter) # reate console", "error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_data_source(cls,pipeline_type): dirs = os.walk(\"{0}/pipelines/\".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))).next()[1] is_type_ok = True", "= \"kafka-topics --delete --zookeeper {0} --topic {1}\".format(zk,topic) try: logger.info(\"SPOT.Utils: Executing: {0}\".format(rm_kafka_topic)) subprocess.call(rm_kafka_topic,shell=True) except", "Observer from watchdog.events import FileSystemEventHandler class Util(object): @classmethod def remove_kafka_topic(cls,zk,topic,logger): rm_kafka_topic = \"kafka-topics", "copyright ownership. # The ASF licenses this file to You under the Apache", "-mkdir -p {0}\".format(hdfs_path) logger.info(\"SPOT.Utils: Creating hdfs folder: {0}\".format(hadoop_create_folder)) subprocess.call(hadoop_create_folder,shell=True) @classmethod def load_to_hdfs(cls,file_local_path,file_hdfs_path,logger): #", "%(name)s - %(levelname)s - %(message)s') if create_file: # create file handler for logger.", "handler for logger. fh = logging.FileHandler('SPOT.log') fh.setLevel(level=logging.DEBUG) fh.setFormatter(formatter) # reate console handler for", "it to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')", "# this work for additional information regarding copyright ownership. # The ASF licenses", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "= \"hadoop fs -moveFromLocal {0} {1}\".format(file_local_path,file_hdfs_path) logger.info(\"SPOT.Utils: Loading file to hdfs: {0}\".format(load_to_hadoop_script)) subprocess.call(load_to_hadoop_script,shell=True)", "There was an error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_data_source(cls,pipeline_type): dirs = os.walk(\"{0}/pipelines/\".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))).next()[1]", "handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') if create_file: #", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "validate_parameter(cls,parameter,message,logger): if parameter == None or parameter == \"\": logger.error(message) sys.exit(1) @classmethod def", "permissions and # limitations under the License. # import os import sys import", "the License. # import os import sys import subprocess import logging from watchdog.observers", "new_file observer = Observer() observer.schedule(event_handler,collector_path) return observer @classmethod def execute_cmd(cls,command,logger): try: logger.info(\"SPOT.Utils: Executing:", "pipeline_instance = None def __init__(self,pipeline_instance): self.pipeline_instance = pipeline_instance def on_moved(self,event): if not event.is_directory:", "handler for logger. ch = logging.StreamHandler() ch.setLevel(level=logging.DEBUG) ch.setFormatter(formatter) # add handlers to logger.", "None or parameter == \"\": logger.error(message) sys.exit(1) @classmethod def creat_hdfs_folder(cls,hdfs_path,logger): hadoop_create_folder=\"hadoop fs -mkdir", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "def validate_parameter(cls,parameter,message,logger): if parameter == None or parameter == \"\": logger.error(message) sys.exit(1) @classmethod", "License, Version 2.0 # (the \"License\"); you may not use this file except", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "remove_kafka_topic(cls,zk,topic,logger): rm_kafka_topic = \"kafka-topics --delete --zookeeper {0} --topic {1}\".format(zk,topic) try: logger.info(\"SPOT.Utils: Executing: {0}\".format(rm_kafka_topic))", "load_to_hadoop_script = \"hadoop fs -moveFromLocal {0} {1}\".format(file_local_path,file_hdfs_path) logger.info(\"SPOT.Utils: Loading file to hdfs: {0}\".format(load_to_hadoop_script))", "e: logger.error(\"SPOT.Utils: There was an error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_parameter(cls,parameter,message,logger): if", "{1}\".format(zk,topic) try: logger.info(\"SPOT.Utils: Executing: {0}\".format(rm_kafka_topic)) subprocess.call(rm_kafka_topic,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There was", "required by applicable law or agreed to in writing, software # distributed under", "logger.error(message) sys.exit(1) @classmethod def creat_hdfs_folder(cls,hdfs_path,logger): hadoop_create_folder=\"hadoop fs -mkdir -p {0}\".format(hdfs_path) logger.info(\"SPOT.Utils: Creating hdfs", "this file except in compliance with # the License. You may obtain a", "e: logger.error(\"SPOT.Utils: There was an error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_data_source(cls,pipeline_type): dirs", "applicable law or agreed to in writing, software # distributed under the License", "create logger for prd_ci log = logging.getLogger(logger_name) log.setLevel(level=logging.INFO) # create formatter and add", "log.addHandler(ch) return log @classmethod def create_watcher(cls,collector_path,new_file,logger): logger.info(\"Creating collector watcher\") event_handler = new_file observer", "or agreed to in writing, software # distributed under the License is distributed", "__init__(self,pipeline_instance): self.pipeline_instance = pipeline_instance def on_moved(self,event): if not event.is_directory: self.pipeline_instance.new_file_detected(event.dest_path) def on_created(self,event): if", "{0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_parameter(cls,parameter,message,logger): if parameter == None or parameter == \"\":", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_data_source(cls,pipeline_type): dirs = os.walk(\"{0}/pipelines/\".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))).next()[1] is_type_ok = True if", "ch = logging.StreamHandler() ch.setLevel(level=logging.DEBUG) ch.setFormatter(formatter) # add handlers to logger. if create_file: log.addHandler(fh)", "create_file: # create file handler for logger. fh = logging.FileHandler('SPOT.log') fh.setLevel(level=logging.DEBUG) fh.setFormatter(formatter) #", "== None or parameter == \"\": logger.error(message) sys.exit(1) @classmethod def creat_hdfs_folder(cls,hdfs_path,logger): hadoop_create_folder=\"hadoop fs", "or parameter == \"\": logger.error(message) sys.exit(1) @classmethod def creat_hdfs_folder(cls,hdfs_path,logger): hadoop_create_folder=\"hadoop fs -mkdir -p", "def remove_kafka_topic(cls,zk,topic,logger): rm_kafka_topic = \"kafka-topics --delete --zookeeper {0} --topic {1}\".format(zk,topic) try: logger.info(\"SPOT.Utils: Executing:", "create_watcher(cls,collector_path,new_file,logger): logger.info(\"Creating collector watcher\") event_handler = new_file observer = Observer() observer.schedule(event_handler,collector_path) return observer", "writing, software # distributed under the License is distributed on an \"AS IS\"", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "under the License. # import os import sys import subprocess import logging from", "to hdfs. load_to_hadoop_script = \"hadoop fs -moveFromLocal {0} {1}\".format(file_local_path,file_hdfs_path) logger.info(\"SPOT.Utils: Loading file to", "observer.schedule(event_handler,collector_path) return observer @classmethod def execute_cmd(cls,command,logger): try: logger.info(\"SPOT.Utils: Executing: {0}\".format(command)) subprocess.call(command,shell=True) except subprocess.CalledProcessError", "compliance with # the License. You may obtain a copy of the License", "NOTICE file distributed with # this work for additional information regarding copyright ownership.", "distributed with # this work for additional information regarding copyright ownership. # The", "as e: logger.error(\"SPOT.Utils: There was an error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_data_source(cls,pipeline_type):", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "specific language governing permissions and # limitations under the License. # import os", "License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "agreements. See the NOTICE file distributed with # this work for additional information", "ASF licenses this file to You under the Apache License, Version 2.0 #", "return is_type_ok class NewFileEvent(FileSystemEventHandler): pipeline_instance = None def __init__(self,pipeline_instance): self.pipeline_instance = pipeline_instance def", "log.setLevel(level=logging.INFO) # create formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s", "There was an error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_parameter(cls,parameter,message,logger): if parameter ==", "an error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_data_source(cls,pipeline_type): dirs = os.walk(\"{0}/pipelines/\".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))).next()[1] is_type_ok =", "= logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') if create_file: # create file", "def get_logger(cls,logger_name,create_file=False): # create logger for prd_ci log = logging.getLogger(logger_name) log.setLevel(level=logging.INFO) # create", "executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_parameter(cls,parameter,message,logger): if parameter == None or parameter ==", "fs -mkdir -p {0}\".format(hdfs_path) logger.info(\"SPOT.Utils: Creating hdfs folder: {0}\".format(hadoop_create_folder)) subprocess.call(hadoop_create_folder,shell=True) @classmethod def load_to_hdfs(cls,file_local_path,file_hdfs_path,logger):", "2.0 # (the \"License\"); you may not use this file except in compliance", "= pipeline_instance def on_moved(self,event): if not event.is_directory: self.pipeline_instance.new_file_detected(event.dest_path) def on_created(self,event): if not event.is_directory:", "to logger. if create_file: log.addHandler(fh) log.addHandler(ch) return log @classmethod def create_watcher(cls,collector_path,new_file,logger): logger.info(\"Creating collector", "logger. ch = logging.StreamHandler() ch.setLevel(level=logging.DEBUG) ch.setFormatter(formatter) # add handlers to logger. if create_file:", "\"hadoop fs -moveFromLocal {0} {1}\".format(file_local_path,file_hdfs_path) logger.info(\"SPOT.Utils: Loading file to hdfs: {0}\".format(load_to_hadoop_script)) subprocess.call(load_to_hadoop_script,shell=True) @classmethod", "for logger. ch = logging.StreamHandler() ch.setLevel(level=logging.DEBUG) ch.setFormatter(formatter) # add handlers to logger. if", "Apache License, Version 2.0 # (the \"License\"); you may not use this file", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "= Observer() observer.schedule(event_handler,collector_path) return observer @classmethod def execute_cmd(cls,command,logger): try: logger.info(\"SPOT.Utils: Executing: {0}\".format(command)) subprocess.call(command,shell=True)", "fs -moveFromLocal {0} {1}\".format(file_local_path,file_hdfs_path) logger.info(\"SPOT.Utils: Loading file to hdfs: {0}\".format(load_to_hadoop_script)) subprocess.call(load_to_hadoop_script,shell=True) @classmethod def", "logger.error(\"SPOT.Utils: There was an error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_data_source(cls,pipeline_type): dirs =", "limitations under the License. # import os import sys import subprocess import logging", "@classmethod def validate_data_source(cls,pipeline_type): dirs = os.walk(\"{0}/pipelines/\".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))).next()[1] is_type_ok = True if pipeline_type in dirs", "subprocess.call(command,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There was an error executing: {0}\".format(e.cmd)) sys.exit(1)", "in compliance with # the License. You may obtain a copy of the", "sys.exit(1) @classmethod def validate_data_source(cls,pipeline_type): dirs = os.walk(\"{0}/pipelines/\".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))).next()[1] is_type_ok = True if pipeline_type in", "logger for prd_ci log = logging.getLogger(logger_name) log.setLevel(level=logging.INFO) # create formatter and add it", "agreed to in writing, software # distributed under the License is distributed on", "observer @classmethod def execute_cmd(cls,command,logger): try: logger.info(\"SPOT.Utils: Executing: {0}\".format(command)) subprocess.call(command,shell=True) except subprocess.CalledProcessError as e:", "@classmethod def execute_cmd(cls,command,logger): try: logger.info(\"SPOT.Utils: Executing: {0}\".format(command)) subprocess.call(command,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils:", "Foundation (ASF) under one or more # contributor license agreements. See the NOTICE", "file to You under the Apache License, Version 2.0 # (the \"License\"); you", "logger.info(\"Creating collector watcher\") event_handler = new_file observer = Observer() observer.schedule(event_handler,collector_path) return observer @classmethod", "@classmethod def get_logger(cls,logger_name,create_file=False): # create logger for prd_ci log = logging.getLogger(logger_name) log.setLevel(level=logging.INFO) #", "import sys import subprocess import logging from watchdog.observers import Observer from watchdog.events import", "return log @classmethod def create_watcher(cls,collector_path,new_file,logger): logger.info(\"Creating collector watcher\") event_handler = new_file observer =", "collector watcher\") event_handler = new_file observer = Observer() observer.schedule(event_handler,collector_path) return observer @classmethod def", "add it to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s -", "# Unless required by applicable law or agreed to in writing, software #", "may not use this file except in compliance with # the License. You", "by applicable law or agreed to in writing, software # distributed under the", "--zookeeper {0} --topic {1}\".format(zk,topic) try: logger.info(\"SPOT.Utils: Executing: {0}\".format(rm_kafka_topic)) subprocess.call(rm_kafka_topic,shell=True) except subprocess.CalledProcessError as e:", "{0} --topic {1}\".format(zk,topic) try: logger.info(\"SPOT.Utils: Executing: {0}\".format(rm_kafka_topic)) subprocess.call(rm_kafka_topic,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils:", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler class Util(object): @classmethod def remove_kafka_topic(cls,zk,topic,logger):", "def creat_hdfs_folder(cls,hdfs_path,logger): hadoop_create_folder=\"hadoop fs -mkdir -p {0}\".format(hdfs_path) logger.info(\"SPOT.Utils: Creating hdfs folder: {0}\".format(hadoop_create_folder)) subprocess.call(hadoop_create_folder,shell=True)", "load_to_hdfs(cls,file_local_path,file_hdfs_path,logger): # move file to hdfs. load_to_hadoop_script = \"hadoop fs -moveFromLocal {0} {1}\".format(file_local_path,file_hdfs_path)", "hdfs: {0}\".format(load_to_hadoop_script)) subprocess.call(load_to_hadoop_script,shell=True) @classmethod def get_logger(cls,logger_name,create_file=False): # create logger for prd_ci log =", "observer = Observer() observer.schedule(event_handler,collector_path) return observer @classmethod def execute_cmd(cls,command,logger): try: logger.info(\"SPOT.Utils: Executing: {0}\".format(command))", "License. # import os import sys import subprocess import logging from watchdog.observers import", "is_type_ok class NewFileEvent(FileSystemEventHandler): pipeline_instance = None def __init__(self,pipeline_instance): self.pipeline_instance = pipeline_instance def on_moved(self,event):", "an error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_parameter(cls,parameter,message,logger): if parameter == None or", "\"\": logger.error(message) sys.exit(1) @classmethod def creat_hdfs_folder(cls,hdfs_path,logger): hadoop_create_folder=\"hadoop fs -mkdir -p {0}\".format(hdfs_path) logger.info(\"SPOT.Utils: Creating", "execute_cmd(cls,command,logger): try: logger.info(\"SPOT.Utils: Executing: {0}\".format(command)) subprocess.call(command,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There was", "# Licensed to the Apache Software Foundation (ASF) under one or more #", "== \"\": logger.error(message) sys.exit(1) @classmethod def creat_hdfs_folder(cls,hdfs_path,logger): hadoop_create_folder=\"hadoop fs -mkdir -p {0}\".format(hdfs_path) logger.info(\"SPOT.Utils:", "more # contributor license agreements. See the NOTICE file distributed with # this", "License for the specific language governing permissions and # limitations under the License.", "and add it to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s", "Apache Software Foundation (ASF) under one or more # contributor license agreements. See", "to in writing, software # distributed under the License is distributed on an", "watchdog.events import FileSystemEventHandler class Util(object): @classmethod def remove_kafka_topic(cls,zk,topic,logger): rm_kafka_topic = \"kafka-topics --delete --zookeeper", "Observer() observer.schedule(event_handler,collector_path) return observer @classmethod def execute_cmd(cls,command,logger): try: logger.info(\"SPOT.Utils: Executing: {0}\".format(command)) subprocess.call(command,shell=True) except", "to You under the Apache License, Version 2.0 # (the \"License\"); you may", "implied. # See the License for the specific language governing permissions and #", "creat_hdfs_folder(cls,hdfs_path,logger): hadoop_create_folder=\"hadoop fs -mkdir -p {0}\".format(hdfs_path) logger.info(\"SPOT.Utils: Creating hdfs folder: {0}\".format(hadoop_create_folder)) subprocess.call(hadoop_create_folder,shell=True) @classmethod", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "the Apache License, Version 2.0 # (the \"License\"); you may not use this", "if pipeline_type in dirs else False return is_type_ok class NewFileEvent(FileSystemEventHandler): pipeline_instance = None", "# # Licensed to the Apache Software Foundation (ASF) under one or more", "pipeline_instance def on_moved(self,event): if not event.is_directory: self.pipeline_instance.new_file_detected(event.dest_path) def on_created(self,event): if not event.is_directory: self.pipeline_instance.new_file_detected(event.src_path)", "# create formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s -", "= True if pipeline_type in dirs else False return is_type_ok class NewFileEvent(FileSystemEventHandler): pipeline_instance", "\"License\"); you may not use this file except in compliance with # the", "or implied. # See the License for the specific language governing permissions and", "-p {0}\".format(hdfs_path) logger.info(\"SPOT.Utils: Creating hdfs folder: {0}\".format(hadoop_create_folder)) subprocess.call(hadoop_create_folder,shell=True) @classmethod def load_to_hdfs(cls,file_local_path,file_hdfs_path,logger): # move", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "@classmethod def creat_hdfs_folder(cls,hdfs_path,logger): hadoop_create_folder=\"hadoop fs -mkdir -p {0}\".format(hdfs_path) logger.info(\"SPOT.Utils: Creating hdfs folder: {0}\".format(hadoop_create_folder))", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "the Apache Software Foundation (ASF) under one or more # contributor license agreements.", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_parameter(cls,parameter,message,logger): if parameter == None or parameter", "file handler for logger. fh = logging.FileHandler('SPOT.log') fh.setLevel(level=logging.DEBUG) fh.setFormatter(formatter) # reate console handler", "is_type_ok = True if pipeline_type in dirs else False return is_type_ok class NewFileEvent(FileSystemEventHandler):", "governing permissions and # limitations under the License. # import os import sys", "@classmethod def create_watcher(cls,collector_path,new_file,logger): logger.info(\"Creating collector watcher\") event_handler = new_file observer = Observer() observer.schedule(event_handler,collector_path)", "sys import subprocess import logging from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler", "and # limitations under the License. # import os import sys import subprocess", "except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There was an error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod", "as e: logger.error(\"SPOT.Utils: There was an error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_parameter(cls,parameter,message,logger):", "pipeline_type in dirs else False return is_type_ok class NewFileEvent(FileSystemEventHandler): pipeline_instance = None def", "fh.setFormatter(formatter) # reate console handler for logger. ch = logging.StreamHandler() ch.setLevel(level=logging.DEBUG) ch.setFormatter(formatter) #", "in dirs else False return is_type_ok class NewFileEvent(FileSystemEventHandler): pipeline_instance = None def __init__(self,pipeline_instance):", "validate_data_source(cls,pipeline_type): dirs = os.walk(\"{0}/pipelines/\".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))).next()[1] is_type_ok = True if pipeline_type in dirs else False", "you may not use this file except in compliance with # the License.", "@classmethod def validate_parameter(cls,parameter,message,logger): if parameter == None or parameter == \"\": logger.error(message) sys.exit(1)", "use this file except in compliance with # the License. You may obtain", "Software Foundation (ASF) under one or more # contributor license agreements. See the", "sys.exit(1) @classmethod def creat_hdfs_folder(cls,hdfs_path,logger): hadoop_create_folder=\"hadoop fs -mkdir -p {0}\".format(hdfs_path) logger.info(\"SPOT.Utils: Creating hdfs folder:", "fh.setLevel(level=logging.DEBUG) fh.setFormatter(formatter) # reate console handler for logger. ch = logging.StreamHandler() ch.setLevel(level=logging.DEBUG) ch.setFormatter(formatter)", "fh = logging.FileHandler('SPOT.log') fh.setLevel(level=logging.DEBUG) fh.setFormatter(formatter) # reate console handler for logger. ch =", "ch.setLevel(level=logging.DEBUG) ch.setFormatter(formatter) # add handlers to logger. if create_file: log.addHandler(fh) log.addHandler(ch) return log", "from watchdog.events import FileSystemEventHandler class Util(object): @classmethod def remove_kafka_topic(cls,zk,topic,logger): rm_kafka_topic = \"kafka-topics --delete", "logger.info(\"SPOT.Utils: Loading file to hdfs: {0}\".format(load_to_hadoop_script)) subprocess.call(load_to_hadoop_script,shell=True) @classmethod def get_logger(cls,logger_name,create_file=False): # create logger", "def __init__(self,pipeline_instance): self.pipeline_instance = pipeline_instance def on_moved(self,event): if not event.is_directory: self.pipeline_instance.new_file_detected(event.dest_path) def on_created(self,event):", "for prd_ci log = logging.getLogger(logger_name) log.setLevel(level=logging.INFO) # create formatter and add it to", "Loading file to hdfs: {0}\".format(load_to_hadoop_script)) subprocess.call(load_to_hadoop_script,shell=True) @classmethod def get_logger(cls,logger_name,create_file=False): # create logger for", "language governing permissions and # limitations under the License. # import os import", "@classmethod def load_to_hdfs(cls,file_local_path,file_hdfs_path,logger): # move file to hdfs. load_to_hadoop_script = \"hadoop fs -moveFromLocal", "else False return is_type_ok class NewFileEvent(FileSystemEventHandler): pipeline_instance = None def __init__(self,pipeline_instance): self.pipeline_instance =", "= os.walk(\"{0}/pipelines/\".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))).next()[1] is_type_ok = True if pipeline_type in dirs else False return is_type_ok", "formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') if create_file: # create", "for the specific language governing permissions and # limitations under the License. #", "import FileSystemEventHandler class Util(object): @classmethod def remove_kafka_topic(cls,zk,topic,logger): rm_kafka_topic = \"kafka-topics --delete --zookeeper {0}", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "# (the \"License\"); you may not use this file except in compliance with", "None def __init__(self,pipeline_instance): self.pipeline_instance = pipeline_instance def on_moved(self,event): if not event.is_directory: self.pipeline_instance.new_file_detected(event.dest_path) def", "folder: {0}\".format(hadoop_create_folder)) subprocess.call(hadoop_create_folder,shell=True) @classmethod def load_to_hdfs(cls,file_local_path,file_hdfs_path,logger): # move file to hdfs. load_to_hadoop_script =", "# # Unless required by applicable law or agreed to in writing, software", "= logging.FileHandler('SPOT.log') fh.setLevel(level=logging.DEBUG) fh.setFormatter(formatter) # reate console handler for logger. ch = logging.StreamHandler()", "python # # Licensed to the Apache Software Foundation (ASF) under one or", "express or implied. # See the License for the specific language governing permissions", "except in compliance with # the License. You may obtain a copy of", "file distributed with # this work for additional information regarding copyright ownership. #", "(ASF) under one or more # contributor license agreements. See the NOTICE file", "the License. You may obtain a copy of the License at # #", "{0}\".format(command)) subprocess.call(command,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There was an error executing: {0}\".format(e.cmd))", "either express or implied. # See the License for the specific language governing", "or more # contributor license agreements. See the NOTICE file distributed with #", "import subprocess import logging from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler class", "{1}\".format(file_local_path,file_hdfs_path) logger.info(\"SPOT.Utils: Loading file to hdfs: {0}\".format(load_to_hadoop_script)) subprocess.call(load_to_hadoop_script,shell=True) @classmethod def get_logger(cls,logger_name,create_file=False): # create", "was an error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_data_source(cls,pipeline_type): dirs = os.walk(\"{0}/pipelines/\".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))).next()[1] is_type_ok", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "Executing: {0}\".format(rm_kafka_topic)) subprocess.call(rm_kafka_topic,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There was an error executing:", "to hdfs: {0}\".format(load_to_hadoop_script)) subprocess.call(load_to_hadoop_script,shell=True) @classmethod def get_logger(cls,logger_name,create_file=False): # create logger for prd_ci log", "# create file handler for logger. fh = logging.FileHandler('SPOT.log') fh.setLevel(level=logging.DEBUG) fh.setFormatter(formatter) # reate", "hdfs. load_to_hadoop_script = \"hadoop fs -moveFromLocal {0} {1}\".format(file_local_path,file_hdfs_path) logger.info(\"SPOT.Utils: Loading file to hdfs:", "try: logger.info(\"SPOT.Utils: Executing: {0}\".format(rm_kafka_topic)) subprocess.call(rm_kafka_topic,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There was an", "dirs else False return is_type_ok class NewFileEvent(FileSystemEventHandler): pipeline_instance = None def __init__(self,pipeline_instance): self.pipeline_instance", "\"kafka-topics --delete --zookeeper {0} --topic {1}\".format(zk,topic) try: logger.info(\"SPOT.Utils: Executing: {0}\".format(rm_kafka_topic)) subprocess.call(rm_kafka_topic,shell=True) except subprocess.CalledProcessError", "license agreements. See the NOTICE file distributed with # this work for additional", "if create_file: log.addHandler(fh) log.addHandler(ch) return log @classmethod def create_watcher(cls,collector_path,new_file,logger): logger.info(\"Creating collector watcher\") event_handler", "logger.info(\"SPOT.Utils: Executing: {0}\".format(command)) subprocess.call(command,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There was an error", "# create logger for prd_ci log = logging.getLogger(logger_name) log.setLevel(level=logging.INFO) # create formatter and", "licenses this file to You under the Apache License, Version 2.0 # (the", "subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There was an error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def", "parameter == \"\": logger.error(message) sys.exit(1) @classmethod def creat_hdfs_folder(cls,hdfs_path,logger): hadoop_create_folder=\"hadoop fs -mkdir -p {0}\".format(hdfs_path)", "under the Apache License, Version 2.0 # (the \"License\"); you may not use", "# reate console handler for logger. ch = logging.StreamHandler() ch.setLevel(level=logging.DEBUG) ch.setFormatter(formatter) # add", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "Version 2.0 # (the \"License\"); you may not use this file except in", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "%(message)s') if create_file: # create file handler for logger. fh = logging.FileHandler('SPOT.log') fh.setLevel(level=logging.DEBUG)", "os import sys import subprocess import logging from watchdog.observers import Observer from watchdog.events", "def validate_data_source(cls,pipeline_type): dirs = os.walk(\"{0}/pipelines/\".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))).next()[1] is_type_ok = True if pipeline_type in dirs else", "if create_file: # create file handler for logger. fh = logging.FileHandler('SPOT.log') fh.setLevel(level=logging.DEBUG) fh.setFormatter(formatter)", "Creating hdfs folder: {0}\".format(hadoop_create_folder)) subprocess.call(hadoop_create_folder,shell=True) @classmethod def load_to_hdfs(cls,file_local_path,file_hdfs_path,logger): # move file to hdfs.", "- %(name)s - %(levelname)s - %(message)s') if create_file: # create file handler for", "create formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s", "<gh_stars>1-10 #!/bin/env python # # Licensed to the Apache Software Foundation (ASF) under", "# contributor license agreements. See the NOTICE file distributed with # this work", "logging from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler class Util(object): @classmethod def", "log = logging.getLogger(logger_name) log.setLevel(level=logging.INFO) # create formatter and add it to the handlers", "# move file to hdfs. load_to_hadoop_script = \"hadoop fs -moveFromLocal {0} {1}\".format(file_local_path,file_hdfs_path) logger.info(\"SPOT.Utils:", "See the NOTICE file distributed with # this work for additional information regarding", "prd_ci log = logging.getLogger(logger_name) log.setLevel(level=logging.INFO) # create formatter and add it to the", "hadoop_create_folder=\"hadoop fs -mkdir -p {0}\".format(hdfs_path) logger.info(\"SPOT.Utils: Creating hdfs folder: {0}\".format(hadoop_create_folder)) subprocess.call(hadoop_create_folder,shell=True) @classmethod def", "%(levelname)s - %(message)s') if create_file: # create file handler for logger. fh =", "create_file: log.addHandler(fh) log.addHandler(ch) return log @classmethod def create_watcher(cls,collector_path,new_file,logger): logger.info(\"Creating collector watcher\") event_handler =", "with # the License. You may obtain a copy of the License at", "self.pipeline_instance = pipeline_instance def on_moved(self,event): if not event.is_directory: self.pipeline_instance.new_file_detected(event.dest_path) def on_created(self,event): if not", "add handlers to logger. if create_file: log.addHandler(fh) log.addHandler(ch) return log @classmethod def create_watcher(cls,collector_path,new_file,logger):", "under one or more # contributor license agreements. See the NOTICE file distributed", "{0}\".format(load_to_hadoop_script)) subprocess.call(load_to_hadoop_script,shell=True) @classmethod def get_logger(cls,logger_name,create_file=False): # create logger for prd_ci log = logging.getLogger(logger_name)", "logger. fh = logging.FileHandler('SPOT.log') fh.setLevel(level=logging.DEBUG) fh.setFormatter(formatter) # reate console handler for logger. ch", "= logging.getLogger(logger_name) log.setLevel(level=logging.INFO) # create formatter and add it to the handlers formatter", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "# the License. You may obtain a copy of the License at #", "this file to You under the Apache License, Version 2.0 # (the \"License\");", "not use this file except in compliance with # the License. You may", "logger.info(\"SPOT.Utils: Executing: {0}\".format(rm_kafka_topic)) subprocess.call(rm_kafka_topic,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There was an error", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "--topic {1}\".format(zk,topic) try: logger.info(\"SPOT.Utils: Executing: {0}\".format(rm_kafka_topic)) subprocess.call(rm_kafka_topic,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There", "You under the Apache License, Version 2.0 # (the \"License\"); you may not", "the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') if create_file:", "try: logger.info(\"SPOT.Utils: Executing: {0}\".format(command)) subprocess.call(command,shell=True) except subprocess.CalledProcessError as e: logger.error(\"SPOT.Utils: There was an", "FileSystemEventHandler class Util(object): @classmethod def remove_kafka_topic(cls,zk,topic,logger): rm_kafka_topic = \"kafka-topics --delete --zookeeper {0} --topic", "one or more # contributor license agreements. See the NOTICE file distributed with", "--delete --zookeeper {0} --topic {1}\".format(zk,topic) try: logger.info(\"SPOT.Utils: Executing: {0}\".format(rm_kafka_topic)) subprocess.call(rm_kafka_topic,shell=True) except subprocess.CalledProcessError as", "{0} {1}\".format(file_local_path,file_hdfs_path) logger.info(\"SPOT.Utils: Loading file to hdfs: {0}\".format(load_to_hadoop_script)) subprocess.call(load_to_hadoop_script,shell=True) @classmethod def get_logger(cls,logger_name,create_file=False): #", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "move file to hdfs. load_to_hadoop_script = \"hadoop fs -moveFromLocal {0} {1}\".format(file_local_path,file_hdfs_path) logger.info(\"SPOT.Utils: Loading", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "Licensed to the Apache Software Foundation (ASF) under one or more # contributor", "watchdog.observers import Observer from watchdog.events import FileSystemEventHandler class Util(object): @classmethod def remove_kafka_topic(cls,zk,topic,logger): rm_kafka_topic", "@classmethod def remove_kafka_topic(cls,zk,topic,logger): rm_kafka_topic = \"kafka-topics --delete --zookeeper {0} --topic {1}\".format(zk,topic) try: logger.info(\"SPOT.Utils:", "See the License for the specific language governing permissions and # limitations under", "{0}\".format(hadoop_create_folder)) subprocess.call(hadoop_create_folder,shell=True) @classmethod def load_to_hdfs(cls,file_local_path,file_hdfs_path,logger): # move file to hdfs. load_to_hadoop_script = \"hadoop", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "class Util(object): @classmethod def remove_kafka_topic(cls,zk,topic,logger): rm_kafka_topic = \"kafka-topics --delete --zookeeper {0} --topic {1}\".format(zk,topic)", "log @classmethod def create_watcher(cls,collector_path,new_file,logger): logger.info(\"Creating collector watcher\") event_handler = new_file observer = Observer()", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "# add handlers to logger. if create_file: log.addHandler(fh) log.addHandler(ch) return log @classmethod def", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "logging.StreamHandler() ch.setLevel(level=logging.DEBUG) ch.setFormatter(formatter) # add handlers to logger. if create_file: log.addHandler(fh) log.addHandler(ch) return", "(the \"License\"); you may not use this file except in compliance with #", "subprocess.call(load_to_hadoop_script,shell=True) @classmethod def get_logger(cls,logger_name,create_file=False): # create logger for prd_ci log = logging.getLogger(logger_name) log.setLevel(level=logging.INFO)", "for additional information regarding copyright ownership. # The ASF licenses this file to", "formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s -", "class NewFileEvent(FileSystemEventHandler): pipeline_instance = None def __init__(self,pipeline_instance): self.pipeline_instance = pipeline_instance def on_moved(self,event): if", "subprocess import logging from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler class Util(object):", "the specific language governing permissions and # limitations under the License. # import", "- %(message)s') if create_file: # create file handler for logger. fh = logging.FileHandler('SPOT.log')", "console handler for logger. ch = logging.StreamHandler() ch.setLevel(level=logging.DEBUG) ch.setFormatter(formatter) # add handlers to", "additional information regarding copyright ownership. # The ASF licenses this file to You", "contributor license agreements. See the NOTICE file distributed with # this work for", "# import os import sys import subprocess import logging from watchdog.observers import Observer", "import Observer from watchdog.events import FileSystemEventHandler class Util(object): @classmethod def remove_kafka_topic(cls,zk,topic,logger): rm_kafka_topic =", "def load_to_hdfs(cls,file_local_path,file_hdfs_path,logger): # move file to hdfs. load_to_hadoop_script = \"hadoop fs -moveFromLocal {0}", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "- %(levelname)s - %(message)s') if create_file: # create file handler for logger. fh", "logger.info(\"SPOT.Utils: Creating hdfs folder: {0}\".format(hadoop_create_folder)) subprocess.call(hadoop_create_folder,shell=True) @classmethod def load_to_hdfs(cls,file_local_path,file_hdfs_path,logger): # move file to", "def create_watcher(cls,collector_path,new_file,logger): logger.info(\"Creating collector watcher\") event_handler = new_file observer = Observer() observer.schedule(event_handler,collector_path) return", "logging.FileHandler('SPOT.log') fh.setLevel(level=logging.DEBUG) fh.setFormatter(formatter) # reate console handler for logger. ch = logging.StreamHandler() ch.setLevel(level=logging.DEBUG)", "this work for additional information regarding copyright ownership. # The ASF licenses this", "rm_kafka_topic = \"kafka-topics --delete --zookeeper {0} --topic {1}\".format(zk,topic) try: logger.info(\"SPOT.Utils: Executing: {0}\".format(rm_kafka_topic)) subprocess.call(rm_kafka_topic,shell=True)", "to the Apache Software Foundation (ASF) under one or more # contributor license", "logger. if create_file: log.addHandler(fh) log.addHandler(ch) return log @classmethod def create_watcher(cls,collector_path,new_file,logger): logger.info(\"Creating collector watcher\")", "NewFileEvent(FileSystemEventHandler): pipeline_instance = None def __init__(self,pipeline_instance): self.pipeline_instance = pipeline_instance def on_moved(self,event): if not", "with # this work for additional information regarding copyright ownership. # The ASF", "file except in compliance with # the License. You may obtain a copy", "information regarding copyright ownership. # The ASF licenses this file to You under", "# limitations under the License. # import os import sys import subprocess import", "import os import sys import subprocess import logging from watchdog.observers import Observer from", "event_handler = new_file observer = Observer() observer.schedule(event_handler,collector_path) return observer @classmethod def execute_cmd(cls,command,logger): try:", "was an error executing: {0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_parameter(cls,parameter,message,logger): if parameter == None", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "{0}\".format(e.cmd)) sys.exit(1) @classmethod def validate_data_source(cls,pipeline_type): dirs = os.walk(\"{0}/pipelines/\".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))).next()[1] is_type_ok = True if pipeline_type", "os.walk(\"{0}/pipelines/\".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))).next()[1] is_type_ok = True if pipeline_type in dirs else False return is_type_ok class", "True if pipeline_type in dirs else False return is_type_ok class NewFileEvent(FileSystemEventHandler): pipeline_instance =", "ch.setFormatter(formatter) # add handlers to logger. if create_file: log.addHandler(fh) log.addHandler(ch) return log @classmethod", "# The ASF licenses this file to You under the Apache License, Version" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "language governing permissions and # limitations under the License. # ============================================================================== from __future__", "of edge types p: back probality q: forward probality default_node: default fill nodes", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "Tensor edge_types: list of 1-d Tensor of edge types p: back probality q:", "len(edge_types), ', '.join(str(x) for x in uniq_nodes), ', '.join(str('e_' + x) for x", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "[nodes, edge_types, p, q, default_node], [tf.int64], True, 'NebulaRandomWalk' ) result[0].set_shape((nodes.shape.dims[0].value, len(edge_types) + 1))", ") result[0].set_shape((nodes.shape.dims[0].value, len(edge_types) + 1)) return result[0] def _nebula_random_walk(nodes, edge_types, p, q, default_node):", "edge_types = [type_ops.get_edge_type_id(edge_type) for edge_type in edge_types] return _random_walk(nodes, edge_types, p, q, default_node)", "= {}.fromkeys(nodes).keys() nql = 'USE {}; randomwalk {} from {} over {} where", "License. # You may obtain a copy of the License at # #", "path_nodes = map(lambda x: long(x if x != '-1' else default_node), path.split('#')) path_cache[path_nodes[0]]", "path = row.columns[0].get_str() path_nodes = map(lambda x: long(x if x != '-1' else", "'-1' else default_node), path.split('#')) path_cache[path_nodes[0]] = path_nodes for node in nodes: paths.append(path_cache[node]) return", "edge_types, p=1.0, q=1.0, default_node=-1): ''' Random walk from a list of nodes. Args:", "nodes ''' if base.nebula_ops['random_walk']: return nebula_random_walk(nodes, edge_types, p, q, default_node) edge_types = [type_ops.get_edge_type_id(edge_type)", "print_function import tensorflow as tf from tf_euler.python.euler_ops import base from tf_euler.python.euler_ops import type_ops", "default_node=-1): ''' Random walk from a list of nodes. Args: nodes: start node", "q: forward probality default_node: default fill nodes ''' if base.nebula_ops['random_walk']: return nebula_random_walk(nodes, edge_types,", "= [] uniq_nodes = {}.fromkeys(nodes).keys() nql = 'USE {}; randomwalk {} from {}", "Args: nodes: start node ids, 1-d Tensor edge_types: list of 1-d Tensor of", "p, q, default_node], [tf.int64], True, 'NebulaRandomWalk' ) result[0].set_shape((nodes.shape.dims[0].value, len(edge_types) + 1)) return result[0]", "[] uniq_nodes = {}.fromkeys(nodes).keys() nql = 'USE {}; randomwalk {} from {} over", "law or agreed to in writing, software # distributed under the License is", "in edge_types[0]), p, q ) path_cache = {} resp = base.nebula_client.execute_query(nql) if resp.rows", "the License for the specific language governing permissions and # limitations under the", "2020 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the", "+ x) for x in edge_types[0]), p, q ) path_cache = {} resp", "compliance with the License. # You may obtain a copy of the License", "'.join(str(x) for x in uniq_nodes), ', '.join(str('e_' + x) for x in edge_types[0]),", "p, q ) path_cache = {} resp = base.nebula_client.execute_query(nql) if resp.rows is not", "= {} resp = base.nebula_client.execute_query(nql) if resp.rows is not None: for row in", "import print_function import tensorflow as tf from tf_euler.python.euler_ops import base from tf_euler.python.euler_ops import", "_random_walk = base._LIB_OP.random_walk def random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): ''' Random walk from", "_nebula_random_walk(nodes, edge_types, p, q, default_node): paths = [] uniq_nodes = {}.fromkeys(nodes).keys() nql =", "else default_node), path.split('#')) path_cache[path_nodes[0]] = path_nodes for node in nodes: paths.append(path_cache[node]) return np.asarray(paths,", "uniq_nodes = {}.fromkeys(nodes).keys() nql = 'USE {}; randomwalk {} from {} over {}", "return nebula_random_walk(nodes, edge_types, p, q, default_node) edge_types = [type_ops.get_edge_type_id(edge_type) for edge_type in edge_types]", "specific language governing permissions and # limitations under the License. # ============================================================================== from", "q=1.0, default_node=-1): result = tf.py_func( _nebula_random_walk, [nodes, edge_types, p, q, default_node], [tf.int64], True,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "edge_types, p, q, default_node) edge_types = [type_ops.get_edge_type_id(edge_type) for edge_type in edge_types] return _random_walk(nodes,", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "types p: back probality q: forward probality default_node: default fill nodes ''' if", "[tf.int64], True, 'NebulaRandomWalk' ) result[0].set_shape((nodes.shape.dims[0].value, len(edge_types) + 1)) return result[0] def _nebula_random_walk(nodes, edge_types,", "you may not use this file except in compliance with the License. #", "Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License,", "base._LIB_OP.random_walk def random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): ''' Random walk from a list", "division from __future__ import print_function import tensorflow as tf from tf_euler.python.euler_ops import base", "p=={} and q=={}'.format( base.nebula_space, len(edge_types), ', '.join(str(x) for x in uniq_nodes), ', '.join(str('e_'", "is not None: for row in resp.rows: path = row.columns[0].get_str() path_nodes = map(lambda", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "# limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__", "the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from", "start node ids, 1-d Tensor edge_types: list of 1-d Tensor of edge types", "default_node): paths = [] uniq_nodes = {}.fromkeys(nodes).keys() nql = 'USE {}; randomwalk {}", "default_node), path.split('#')) path_cache[path_nodes[0]] = path_nodes for node in nodes: paths.append(path_cache[node]) return np.asarray(paths, np.int64)", "tf.py_func( _nebula_random_walk, [nodes, edge_types, p, q, default_node], [tf.int64], True, 'NebulaRandomWalk' ) result[0].set_shape((nodes.shape.dims[0].value, len(edge_types)", "and q=={}'.format( base.nebula_space, len(edge_types), ', '.join(str(x) for x in uniq_nodes), ', '.join(str('e_' +", "back probality q: forward probality default_node: default fill nodes ''' if base.nebula_ops['random_walk']: return", "License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__", "base.nebula_client.execute_query(nql) if resp.rows is not None: for row in resp.rows: path = row.columns[0].get_str()", "edge_types: list of 1-d Tensor of edge types p: back probality q: forward", "x: long(x if x != '-1' else default_node), path.split('#')) path_cache[path_nodes[0]] = path_nodes for", "resp = base.nebula_client.execute_query(nql) if resp.rows is not None: for row in resp.rows: path", "ANY KIND, either express or implied. # See the License for the specific", "probality q: forward probality default_node: default fill nodes ''' if base.nebula_ops['random_walk']: return nebula_random_walk(nodes,", "from tf_euler.python.euler_ops import base from tf_euler.python.euler_ops import type_ops import numpy as np gen_pair", "= 'USE {}; randomwalk {} from {} over {} where p=={} and q=={}'.format(", "base._LIB_OP.gen_pair _random_walk = base._LIB_OP.random_walk def random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): ''' Random walk", "absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf", "None: for row in resp.rows: path = row.columns[0].get_str() path_nodes = map(lambda x: long(x", "x != '-1' else default_node), path.split('#')) path_cache[path_nodes[0]] = path_nodes for node in nodes:", "Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "in compliance with the License. # You may obtain a copy of the", "walk from a list of nodes. Args: nodes: start node ids, 1-d Tensor", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "in edge_types] return _random_walk(nodes, edge_types, p, q, default_node) def nebula_random_walk(nodes, edge_types, p=1.0, q=1.0,", "use this file except in compliance with the License. # You may obtain", "nebula_random_walk(nodes, edge_types, p, q, default_node) edge_types = [type_ops.get_edge_type_id(edge_type) for edge_type in edge_types] return", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "not use this file except in compliance with the License. # You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "__future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow", "list of 1-d Tensor of edge types p: back probality q: forward probality", "tf from tf_euler.python.euler_ops import base from tf_euler.python.euler_ops import type_ops import numpy as np", "See the License for the specific language governing permissions and # limitations under", "nodes. Args: nodes: start node ids, 1-d Tensor edge_types: list of 1-d Tensor", "edge_types, p, q, default_node): paths = [] uniq_nodes = {}.fromkeys(nodes).keys() nql = 'USE", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "= row.columns[0].get_str() path_nodes = map(lambda x: long(x if x != '-1' else default_node),", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "from __future__ import division from __future__ import print_function import tensorflow as tf from", "import tensorflow as tf from tf_euler.python.euler_ops import base from tf_euler.python.euler_ops import type_ops import", "limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import", ") path_cache = {} resp = base.nebula_client.execute_query(nql) if resp.rows is not None: for", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "import absolute_import from __future__ import division from __future__ import print_function import tensorflow as", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "governing permissions and # limitations under the License. # ============================================================================== from __future__ import", "tf_euler.python.euler_ops import base from tf_euler.python.euler_ops import type_ops import numpy as np gen_pair =", "where p=={} and q=={}'.format( base.nebula_space, len(edge_types), ', '.join(str(x) for x in uniq_nodes), ',", "edge_types, p=1.0, q=1.0, default_node=-1): result = tf.py_func( _nebula_random_walk, [nodes, edge_types, p, q, default_node],", "Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "x in uniq_nodes), ', '.join(str('e_' + x) for x in edge_types[0]), p, q", "paths = [] uniq_nodes = {}.fromkeys(nodes).keys() nql = 'USE {}; randomwalk {} from", "x) for x in edge_types[0]), p, q ) path_cache = {} resp =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "for x in edge_types[0]), p, q ) path_cache = {} resp = base.nebula_client.execute_query(nql)", "p, q, default_node) edge_types = [type_ops.get_edge_type_id(edge_type) for edge_type in edge_types] return _random_walk(nodes, edge_types,", "1-d Tensor edge_types: list of 1-d Tensor of edge types p: back probality", "OF ANY KIND, either express or implied. # See the License for the", "base.nebula_ops['random_walk']: return nebula_random_walk(nodes, edge_types, p, q, default_node) edge_types = [type_ops.get_edge_type_id(edge_type) for edge_type in", "q=={}'.format( base.nebula_space, len(edge_types), ', '.join(str(x) for x in uniq_nodes), ', '.join(str('e_' + x)", "2.0 (the \"License\"); # you may not use this file except in compliance", "''' Random walk from a list of nodes. Args: nodes: start node ids,", "return _random_walk(nodes, edge_types, p, q, default_node) def nebula_random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): result", "Tensor of edge types p: back probality q: forward probality default_node: default fill", "'USE {}; randomwalk {} from {} over {} where p=={} and q=={}'.format( base.nebula_space,", "# you may not use this file except in compliance with the License.", "ids, 1-d Tensor edge_types: list of 1-d Tensor of edge types p: back", "_random_walk(nodes, edge_types, p, q, default_node) def nebula_random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): result =", "default_node) edge_types = [type_ops.get_edge_type_id(edge_type) for edge_type in edge_types] return _random_walk(nodes, edge_types, p, q,", "for the specific language governing permissions and # limitations under the License. #", "resp.rows is not None: for row in resp.rows: path = row.columns[0].get_str() path_nodes =", "agreed to in writing, software # distributed under the License is distributed on", "under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division", "[type_ops.get_edge_type_id(edge_type) for edge_type in edge_types] return _random_walk(nodes, edge_types, p, q, default_node) def nebula_random_walk(nodes,", "node ids, 1-d Tensor edge_types: list of 1-d Tensor of edge types p:", "if x != '-1' else default_node), path.split('#')) path_cache[path_nodes[0]] = path_nodes for node in", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "def nebula_random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): result = tf.py_func( _nebula_random_walk, [nodes, edge_types, p,", "__future__ import print_function import tensorflow as tf from tf_euler.python.euler_ops import base from tf_euler.python.euler_ops", "import numpy as np gen_pair = base._LIB_OP.gen_pair _random_walk = base._LIB_OP.random_walk def random_walk(nodes, edge_types,", "q ) path_cache = {} resp = base.nebula_client.execute_query(nql) if resp.rows is not None:", "# ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import", "{}.fromkeys(nodes).keys() nql = 'USE {}; randomwalk {} from {} over {} where p=={}", "p=1.0, q=1.0, default_node=-1): result = tf.py_func( _nebula_random_walk, [nodes, edge_types, p, q, default_node], [tf.int64],", "(the \"License\"); # you may not use this file except in compliance with", "{} resp = base.nebula_client.execute_query(nql) if resp.rows is not None: for row in resp.rows:", "uniq_nodes), ', '.join(str('e_' + x) for x in edge_types[0]), p, q ) path_cache", "edge_type in edge_types] return _random_walk(nodes, edge_types, p, q, default_node) def nebula_random_walk(nodes, edge_types, p=1.0,", "', '.join(str('e_' + x) for x in edge_types[0]), p, q ) path_cache =", "resp.rows: path = row.columns[0].get_str() path_nodes = map(lambda x: long(x if x != '-1'", "# # Unless required by applicable law or agreed to in writing, software", "from {} over {} where p=={} and q=={}'.format( base.nebula_space, len(edge_types), ', '.join(str(x) for", "express or implied. # See the License for the specific language governing permissions", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "= base._LIB_OP.random_walk def random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): ''' Random walk from a", "except in compliance with the License. # You may obtain a copy of", "q, default_node) edge_types = [type_ops.get_edge_type_id(edge_type) for edge_type in edge_types] return _random_walk(nodes, edge_types, p,", "the specific language governing permissions and # limitations under the License. # ==============================================================================", "from __future__ import print_function import tensorflow as tf from tf_euler.python.euler_ops import base from", "by applicable law or agreed to in writing, software # distributed under the", "All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the", "gen_pair = base._LIB_OP.gen_pair _random_walk = base._LIB_OP.random_walk def random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): '''", "', '.join(str(x) for x in uniq_nodes), ', '.join(str('e_' + x) for x in", "= map(lambda x: long(x if x != '-1' else default_node), path.split('#')) path_cache[path_nodes[0]] =", "Random walk from a list of nodes. Args: nodes: start node ids, 1-d", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "__future__ import division from __future__ import print_function import tensorflow as tf from tf_euler.python.euler_ops", "nodes: start node ids, 1-d Tensor edge_types: list of 1-d Tensor of edge", "probality default_node: default fill nodes ''' if base.nebula_ops['random_walk']: return nebula_random_walk(nodes, edge_types, p, q,", "import type_ops import numpy as np gen_pair = base._LIB_OP.gen_pair _random_walk = base._LIB_OP.random_walk def", "default_node) def nebula_random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): result = tf.py_func( _nebula_random_walk, [nodes, edge_types,", "result[0].set_shape((nodes.shape.dims[0].value, len(edge_types) + 1)) return result[0] def _nebula_random_walk(nodes, edge_types, p, q, default_node): paths", "Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under", "either express or implied. # See the License for the specific language governing", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "q, default_node) def nebula_random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): result = tf.py_func( _nebula_random_walk, [nodes,", "= [type_ops.get_edge_type_id(edge_type) for edge_type in edge_types] return _random_walk(nodes, edge_types, p, q, default_node) def", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "len(edge_types) + 1)) return result[0] def _nebula_random_walk(nodes, edge_types, p, q, default_node): paths =", "1)) return result[0] def _nebula_random_walk(nodes, edge_types, p, q, default_node): paths = [] uniq_nodes", "in resp.rows: path = row.columns[0].get_str() path_nodes = map(lambda x: long(x if x !=", "'.join(str('e_' + x) for x in edge_types[0]), p, q ) path_cache = {}", "random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): ''' Random walk from a list of nodes.", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "map(lambda x: long(x if x != '-1' else default_node), path.split('#')) path_cache[path_nodes[0]] = path_nodes", "{} where p=={} and q=={}'.format( base.nebula_space, len(edge_types), ', '.join(str(x) for x in uniq_nodes),", "file except in compliance with the License. # You may obtain a copy", "nebula_random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): result = tf.py_func( _nebula_random_walk, [nodes, edge_types, p, q,", "{} from {} over {} where p=={} and q=={}'.format( base.nebula_space, len(edge_types), ', '.join(str(x)", "p, q, default_node) def nebula_random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): result = tf.py_func( _nebula_random_walk,", "p, q, default_node): paths = [] uniq_nodes = {}.fromkeys(nodes).keys() nql = 'USE {};", "import division from __future__ import print_function import tensorflow as tf from tf_euler.python.euler_ops import", "over {} where p=={} and q=={}'.format( base.nebula_space, len(edge_types), ', '.join(str(x) for x in", "result = tf.py_func( _nebula_random_walk, [nodes, edge_types, p, q, default_node], [tf.int64], True, 'NebulaRandomWalk' )", "tf_euler.python.euler_ops import type_ops import numpy as np gen_pair = base._LIB_OP.gen_pair _random_walk = base._LIB_OP.random_walk", "q, default_node): paths = [] uniq_nodes = {}.fromkeys(nodes).keys() nql = 'USE {}; randomwalk", "= base._LIB_OP.gen_pair _random_walk = base._LIB_OP.random_walk def random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): ''' Random", "np gen_pair = base._LIB_OP.gen_pair _random_walk = base._LIB_OP.random_walk def random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1):", "list of nodes. Args: nodes: start node ids, 1-d Tensor edge_types: list of", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed", "default fill nodes ''' if base.nebula_ops['random_walk']: return nebula_random_walk(nodes, edge_types, p, q, default_node) edge_types", "for edge_type in edge_types] return _random_walk(nodes, edge_types, p, q, default_node) def nebula_random_walk(nodes, edge_types,", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "long(x if x != '-1' else default_node), path.split('#')) path_cache[path_nodes[0]] = path_nodes for node", "the License. # You may obtain a copy of the License at #", "for row in resp.rows: path = row.columns[0].get_str() path_nodes = map(lambda x: long(x if", "x in edge_types[0]), p, q ) path_cache = {} resp = base.nebula_client.execute_query(nql) if", "def random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): ''' Random walk from a list of", "to in writing, software # distributed under the License is distributed on an", "row in resp.rows: path = row.columns[0].get_str() path_nodes = map(lambda x: long(x if x", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "tensorflow as tf from tf_euler.python.euler_ops import base from tf_euler.python.euler_ops import type_ops import numpy", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "1-d Tensor of edge types p: back probality q: forward probality default_node: default", "''' if base.nebula_ops['random_walk']: return nebula_random_walk(nodes, edge_types, p, q, default_node) edge_types = [type_ops.get_edge_type_id(edge_type) for", "'NebulaRandomWalk' ) result[0].set_shape((nodes.shape.dims[0].value, len(edge_types) + 1)) return result[0] def _nebula_random_walk(nodes, edge_types, p, q,", "\"License\"); # you may not use this file except in compliance with the", "edge types p: back probality q: forward probality default_node: default fill nodes '''", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "edge_types, p, q, default_node], [tf.int64], True, 'NebulaRandomWalk' ) result[0].set_shape((nodes.shape.dims[0].value, len(edge_types) + 1)) return", "def _nebula_random_walk(nodes, edge_types, p, q, default_node): paths = [] uniq_nodes = {}.fromkeys(nodes).keys() nql", "default_node=-1): result = tf.py_func( _nebula_random_walk, [nodes, edge_types, p, q, default_node], [tf.int64], True, 'NebulaRandomWalk'", "base.nebula_space, len(edge_types), ', '.join(str(x) for x in uniq_nodes), ', '.join(str('e_' + x) for", "from a list of nodes. Args: nodes: start node ids, 1-d Tensor edge_types:", "applicable law or agreed to in writing, software # distributed under the License", "if resp.rows is not None: for row in resp.rows: path = row.columns[0].get_str() path_nodes", "!= '-1' else default_node), path.split('#')) path_cache[path_nodes[0]] = path_nodes for node in nodes: paths.append(path_cache[node])", "for x in uniq_nodes), ', '.join(str('e_' + x) for x in edge_types[0]), p,", "randomwalk {} from {} over {} where p=={} and q=={}'.format( base.nebula_space, len(edge_types), ',", "p=1.0, q=1.0, default_node=-1): ''' Random walk from a list of nodes. Args: nodes:", "path_cache = {} resp = base.nebula_client.execute_query(nql) if resp.rows is not None: for row", "type_ops import numpy as np gen_pair = base._LIB_OP.gen_pair _random_walk = base._LIB_OP.random_walk def random_walk(nodes,", "if base.nebula_ops['random_walk']: return nebula_random_walk(nodes, edge_types, p, q, default_node) edge_types = [type_ops.get_edge_type_id(edge_type) for edge_type", "+ 1)) return result[0] def _nebula_random_walk(nodes, edge_types, p, q, default_node): paths = []", "nql = 'USE {}; randomwalk {} from {} over {} where p=={} and", "default_node: default fill nodes ''' if base.nebula_ops['random_walk']: return nebula_random_walk(nodes, edge_types, p, q, default_node)", "forward probality default_node: default fill nodes ''' if base.nebula_ops['random_walk']: return nebula_random_walk(nodes, edge_types, p,", "row.columns[0].get_str() path_nodes = map(lambda x: long(x if x != '-1' else default_node), path.split('#'))", "of 1-d Tensor of edge types p: back probality q: forward probality default_node:", "from __future__ import absolute_import from __future__ import division from __future__ import print_function import", "import base from tf_euler.python.euler_ops import type_ops import numpy as np gen_pair = base._LIB_OP.gen_pair", "edge_types[0]), p, q ) path_cache = {} resp = base.nebula_client.execute_query(nql) if resp.rows is", "or agreed to in writing, software # distributed under the License is distributed", "q, default_node], [tf.int64], True, 'NebulaRandomWalk' ) result[0].set_shape((nodes.shape.dims[0].value, len(edge_types) + 1)) return result[0] def", "= base.nebula_client.execute_query(nql) if resp.rows is not None: for row in resp.rows: path =", "as tf from tf_euler.python.euler_ops import base from tf_euler.python.euler_ops import type_ops import numpy as", "or implied. # See the License for the specific language governing permissions and", "default_node], [tf.int64], True, 'NebulaRandomWalk' ) result[0].set_shape((nodes.shape.dims[0].value, len(edge_types) + 1)) return result[0] def _nebula_random_walk(nodes,", "of nodes. Args: nodes: start node ids, 1-d Tensor edge_types: list of 1-d", "edge_types] return _random_walk(nodes, edge_types, p, q, default_node) def nebula_random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1):", "q=1.0, default_node=-1): ''' Random walk from a list of nodes. Args: nodes: start", "edge_types, p, q, default_node) def nebula_random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1): result = tf.py_func(", "permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "a list of nodes. Args: nodes: start node ids, 1-d Tensor edge_types: list", "and # limitations under the License. # ============================================================================== from __future__ import absolute_import from", "_nebula_random_walk, [nodes, edge_types, p, q, default_node], [tf.int64], True, 'NebulaRandomWalk' ) result[0].set_shape((nodes.shape.dims[0].value, len(edge_types) +", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "True, 'NebulaRandomWalk' ) result[0].set_shape((nodes.shape.dims[0].value, len(edge_types) + 1)) return result[0] def _nebula_random_walk(nodes, edge_types, p,", "= tf.py_func( _nebula_random_walk, [nodes, edge_types, p, q, default_node], [tf.int64], True, 'NebulaRandomWalk' ) result[0].set_shape((nodes.shape.dims[0].value,", "{}; randomwalk {} from {} over {} where p=={} and q=={}'.format( base.nebula_space, len(edge_types),", "not None: for row in resp.rows: path = row.columns[0].get_str() path_nodes = map(lambda x:", "{} over {} where p=={} and q=={}'.format( base.nebula_space, len(edge_types), ', '.join(str(x) for x", "p: back probality q: forward probality default_node: default fill nodes ''' if base.nebula_ops['random_walk']:", "with the License. # You may obtain a copy of the License at", "numpy as np gen_pair = base._LIB_OP.gen_pair _random_walk = base._LIB_OP.random_walk def random_walk(nodes, edge_types, p=1.0,", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "fill nodes ''' if base.nebula_ops['random_walk']: return nebula_random_walk(nodes, edge_types, p, q, default_node) edge_types =", "Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version", "from tf_euler.python.euler_ops import type_ops import numpy as np gen_pair = base._LIB_OP.gen_pair _random_walk =", "in writing, software # distributed under the License is distributed on an \"AS", "as np gen_pair = base._LIB_OP.gen_pair _random_walk = base._LIB_OP.random_walk def random_walk(nodes, edge_types, p=1.0, q=1.0,", "result[0] def _nebula_random_walk(nodes, edge_types, p, q, default_node): paths = [] uniq_nodes = {}.fromkeys(nodes).keys()", "in uniq_nodes), ', '.join(str('e_' + x) for x in edge_types[0]), p, q )", "base from tf_euler.python.euler_ops import type_ops import numpy as np gen_pair = base._LIB_OP.gen_pair _random_walk", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "return result[0] def _nebula_random_walk(nodes, edge_types, p, q, default_node): paths = [] uniq_nodes =" ]